From 266ebec1b1d83e0d0971f200ef86b882fa6fbc11 Mon Sep 17 00:00:00 2001
From: Brian Mouncer <>
Date: Wed, 8 Jan 2025 19:36:43 -0800
Subject: [PATCH 1/2] push all samples changes back to public GH repository.
---
.../cpp/macos/from-microphone/README.md | 2 +-
quickstart/cpp/macos/text-to-speech/README.md | 2 +-
quickstart/cpp/windows/from-file/README.md | 8 +-
.../from-file/helloworld/helloworld.vcxproj | 4 +-
.../from-file/helloworld/packages.config | 2 +-
.../cpp/windows/from-microphone/README.md | 9 +-
.../helloworld/helloworld.vcxproj | 4 +-
.../helloworld/packages.config | 2 +-
.../cpp/windows/intent-recognition/README.md | 7 +-
.../helloworld/helloworld.vcxproj | 4 +-
.../helloworld/packages.config | 2 +-
.../multi-device-conversation/README.md | 9 +-
.../helloworld/helloworld.vcxproj | 4 +-
.../helloworld/packages.config | 2 +-
.../cpp/windows/speaker-recognition/README.md | 6 +-
.../helloworld/helloworld.vcxproj | 4 +-
.../helloworld/packages.config | 2 +-
.../cpp/windows/text-to-speech/README.md | 9 +-
.../helloworld/helloworld.vcxproj | 4 +-
.../text-to-speech/helloworld/packages.config | 2 +-
.../translate-speech-to-text/README.md | 9 +-
.../helloworld/helloworld.vcxproj | 4 +-
.../helloworld/packages.config | 2 +-
.../conversation-transcription/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
quickstart/csharp/dotnet/from-file/README.md | 9 +-
.../from-file/helloworld/helloworld.csproj | 2 +-
.../from-file/helloworld/packages.config | 2 +-
.../csharp/dotnet/from-microphone/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../helloworld/packages.config | 2 +-
.../dotnet/intent-recognition/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../helloworld/packages.config | 2 +-
.../dotnet/meeting-transcription/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../multi-device-conversation/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../helloworld/packages.config | 2 +-
.../dotnet/speaker-recognition/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../helloworld/packages.config | 2 +-
.../csharp/dotnet/text-to-speech/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../text-to-speech/helloworld/packages.config | 2 +-
.../dotnet/translate-speech-to-text/README.md | 9 +-
.../helloworld/helloworld.csproj | 2 +-
.../helloworld/packages.config | 2 +-
.../dotnetcore/from-microphone/README.md | 23 +-
.../helloworld/helloworld.csproj | 4 +-
.../dotnetcore/text-to-speech/README.md | 25 +-
.../helloworld/helloworld.csproj | 4 +-
.../translate-speech-to-text/README.md | 25 +-
.../helloworld/helloworld.csproj | 4 +-
.../csharp/uwp/from-microphone/README.md | 11 +-
.../helloworld/helloworld.csproj | 2 +-
.../csharp/uwp/keyword-recognizer/README.md | 10 +-
.../helloworld/helloworld.csproj | 2 +-
.../csharp/uwp/text-to-speech/README.md | 11 +-
.../helloworld/helloworld.csproj | 2 +-
.../uwp/translate-speech-to-text/README.md | 11 +-
.../helloworld/helloworld.csproj | 2 +-
.../csharp/uwp/virtual-assistant/README.md | 6 +-
.../uwp/virtual-assistant/helloworld.csproj | 2 +-
.../java/android/from-microphone/README.md | 6 +-
.../android/from-microphone/app/build.gradle | 2 +-
.../java/android/intent-recognition/README.md | 6 +-
.../intent-recognition/app/build.gradle | 2 +-
.../keyword-recognizer-stream/README.md | 6 +-
.../app/build.gradle | 2 +-
.../java/android/keyword-recognizer/README.md | 6 +-
.../keyword-recognizer/app/build.gradle | 2 +-
.../java/android/text-to-speech/README.md | 6 +-
.../android/text-to-speech/app/build.gradle | 2 +-
.../jre/conversation-transcription/README.md | 9 +-
.../jre/conversation-transcription/pom.xml | 2 +-
quickstart/java/jre/from-microphone/README.md | 9 +-
quickstart/java/jre/from-microphone/pom.xml | 2 +-
.../java/jre/intent-recognition/README.md | 9 +-
.../java/jre/intent-recognition/pom.xml | 2 +-
.../java/jre/meeting-transcription/README.md | 9 +-
.../java/jre/meeting-transcription/pom.xml | 2 +-
.../java/jre/speaker-recognition/README.md | 9 +-
.../java/jre/speaker-recognition/pom.xml | 2 +-
quickstart/java/jre/text-to-speech/README.md | 9 +-
quickstart/java/jre/text-to-speech/pom.xml | 2 +-
.../jre/translate-speech-to-text/README.md | 9 +-
.../java/jre/translate-speech-to-text/pom.xml | 2 +-
.../java/jre/virtual-assistant/README.md | 9 +-
quickstart/java/jre/virtual-assistant/pom.xml | 13 +-
.../conversation-transcription/package.json | 2 +-
.../javascript/node/from-file/package.json | 2 +-
.../node/meeting-transcription/package.json | 2 +-
.../identification/package.json | 2 +-
.../verification/package.json | 2 +-
.../node/text-to-speech/package.json | 2 +-
.../objectivec/ios/from-microphone/README.md | 2 +-
.../objectivec/ios/text-to-speech/README.md | 2 +-
.../macos/from-microphone/README.md | 2 +-
.../macos/from-microphone/helloworld/Podfile | 2 +-
.../objectivec/macos/text-to-speech/README.md | 2 +-
.../macos/text-to-speech/helloworld/Podfile | 2 +-
quickstart/python/from-microphone/README.md | 5 +-
.../python/intent-recognition/README.md | 5 +-
quickstart/python/text-to-speech/README.md | 5 +-
.../swift/ios/from-microphone/README.md | 2 +-
quickstart/swift/ios/text-to-speech/README.md | 2 +-
.../swift/macos/from-microphone/README.md | 2 +-
.../swift/macos/text-to-speech/README.md | 2 +-
.../BatchSynthesisSample.csproj | 2 +-
samples/batch-synthesis/java/pom.xml | 8 +-
.../embedded-speech/samples/packages.config | 10 +-
.../embedded-speech/samples/samples.vcxproj | 20 +-
samples/cpp/windows/console/README.md | 7 +-
.../windows/console/samples/packages.config | 4 +-
.../windows/console/samples/samples.vcxproj | 8 +-
.../csharp/dotnet-windows/console/README.md | 7 +-
.../console/samples/packages.config | 4 +-
.../console/samples/samples.csproj | 6 +-
.../speechtotext-naudio/README.md | 7 +-
.../speechtotext-naudio/packages.config | 2 +-
.../speechtotext-naudio.csproj | 8 +-
.../dotnet-windows/speechtotext-wpf/README.md | 7 +-
.../dotnet-windows/translation-wpf/README.md | 7 +-
samples/csharp/dotnetcore/console/README.md | 23 +-
.../dotnetcore/console/samples/samples.csproj | 8 +-
.../dotnetcore/embedded-speech/README.md | 8 +-
.../embedded-speech/samples/samples.csproj | 12 +-
samples/csharp/maui/embedded-speech/README.md | 6 +-
.../embedded-speech/embedded-speech.csproj | 21 +-
samples/csharp/maui/speech-to-text/README.md | 10 +-
.../speech-to-text/speech-to-text.csproj | 9 +-
.../speech_diagnostics_logging_samples.cs | 2 +-
.../console/TtsTextStreamSample.csproj | 4 +-
samples/csharp/uwp/speechtotext-uwp/README.md | 10 +-
.../speechtotext-uwp/speechtotext-uwp.csproj | 2 +-
samples/csharp/uwp/texttospeech-uwp/README.md | 9 +-
.../texttospeech-uwp/texttospeech-uwp.csproj | 2 +-
.../csharp/uwp/virtualassistant-uwp/README.md | 6 +-
.../VirtualAssistantPreview.csproj | 2 +-
samples/csharp/web/avatar/Avatar.csproj | 2 +-
samples/custom-voice/README.md | 5 +-
.../CustomVoiceSample.csproj | 4 +-
.../ingestion-client/versions.nugets.props | 8 +-
.../java/android/SpeechSynthesis/README.md | 6 +-
.../android/SpeechSynthesis/app/build.gradle | 2 +-
samples/java/android/avatar/README.md | 6 +-
samples/java/android/avatar/app/build.gradle | 2 +-
.../java/android/compressed-input/README.md | 6 +-
.../android/compressed-input/app/build.gradle | 2 +-
.../java/android/embedded-speech/README.md | 2 +-
.../android/embedded-speech/app/build.gradle | 2 +-
samples/java/android/sdkdemo/README.md | 6 +-
samples/java/android/sdkdemo/app/build.gradle | 2 +-
samples/java/jre/console/README.md | 9 +-
samples/java/jre/console/pom.xml | 2 +-
.../speech/samples/console/Main.java | 24 ++
.../SpeechDiagnosticsLoggingSamples.java | 226 ++++++++++++++++++
samples/java/jre/embedded-speech/pom.xml | 2 +-
samples/js/node/package.json | 2 +-
.../kotlin/android/continuous-reco/README.md | 6 +-
.../android/continuous-reco/app/build.gradle | 2 +-
.../android/tts-pause-example/README.md | 6 +-
.../tts-pause-example/app/build.gradle | 2 +-
samples/objective-c/ios/README.md | 2 +-
.../ios/compressed-streams/README.md | 2 +-
.../speech-samples.xcodeproj/project.pbxproj | 2 +-
.../speech-keyword-recognition/README.md | 2 +-
.../helloworld/Podfile | 2 +-
samples/python/console/README.md | 6 +-
samples/python/console/speech_sample.py | 32 ++-
samples/realtime-api-plus/poetry.lock | 14 +-
samples/realtime-api-plus/pyproject.toml | 2 +-
samples/swift/ios/README.md | 2 +-
.../ios/conversation-transcription/README.md | 2 +-
samples/swift/ios/embedded-speech/README.md | 2 +-
.../ios/from-external-microphone/README.md | 2 +-
.../speech-keyword-recognition/README.md | 2 +-
.../helloworld/Podfile | 2 +-
samples/video-translation/csharp/readme.md | 3 +
.../captioning/captioning/captioning.vcxproj | 4 +-
.../captioning/captioning/packages.config | 2 +-
.../call-center/call-center.csproj | 2 +-
.../captioning/captioning/captioning.csproj | 4 +-
scenarios/full-duplex-bot/poetry.lock | 14 +-
scenarios/full-duplex-bot/pyproject.toml | 2 +-
scenarios/java/jre/console/captioning/pom.xml | 2 +-
187 files changed, 661 insertions(+), 571 deletions(-)
create mode 100644 samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/SpeechDiagnosticsLoggingSamples.java
diff --git a/quickstart/cpp/macos/from-microphone/README.md b/quickstart/cpp/macos/from-microphone/README.md
index bdcee2c09..f66bdb79b 100644
--- a/quickstart/cpp/macos/from-microphone/README.md
+++ b/quickstart/cpp/macos/from-microphone/README.md
@@ -6,7 +6,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Mac with a working microphone.
+* A Mac with a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/cpp/macos/text-to-speech/README.md b/quickstart/cpp/macos/text-to-speech/README.md
index febb2c13b..eb2404532 100644
--- a/quickstart/cpp/macos/text-to-speech/README.md
+++ b/quickstart/cpp/macos/text-to-speech/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to synthesize speech with C++ using the Speech SDK
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Mac with a working speaker.
+* A Mac with a working speaker. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/cpp/windows/from-file/README.md b/quickstart/cpp/windows/from-file/README.md
index 5bbb58f14..102ebcadd 100644
--- a/quickstart/cpp/windows/from-file/README.md
+++ b/quickstart/cpp/windows/from-file/README.md
@@ -1,20 +1,18 @@
# Quickstart: Recognize speech from a file in C++ for Windows
This sample demonstrates how to recognize speech using wave file as an input with C++ using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2CWindows%2Cjava-runtime%2Cwindowsinstall&pivots=programming-language-cpp) on the SDK documentation page which describes how to build corresponding sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2CWindows%2Cjava-runtime%2Cwindowsinstall&pivots=programming-language-cpp) on the SDK documentation page which describes how to build corresponding sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/cpp/windows/from-file/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/from-file/helloworld/helloworld.vcxproj
index ded6ab68c..23eaa0d68 100644
--- a/quickstart/cpp/windows/from-file/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/from-file/helloworld/helloworld.vcxproj
@@ -170,12 +170,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/from-file/helloworld/packages.config b/quickstart/cpp/windows/from-file/helloworld/packages.config
index e51eea91b..b60229a48 100644
--- a/quickstart/cpp/windows/from-file/helloworld/packages.config
+++ b/quickstart/cpp/windows/from-file/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/from-microphone/README.md b/quickstart/cpp/windows/from-microphone/README.md
index fdb971564..29cd8fd48 100644
--- a/quickstart/cpp/windows/from-microphone/README.md
+++ b/quickstart/cpp/windows/from-microphone/README.md
@@ -1,21 +1,18 @@
# Quickstart: Recognize speech from a microphone in C++ for Windows
This sample demonstrates how to recognize speech with C++ using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2CWindows%2Cjava-runtime%2Cwindowsinstall&pivots=programming-language-cpp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2CWindows%2Cjava-runtime%2Cwindowsinstall&pivots=programming-language-cpp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/cpp/windows/from-microphone/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/from-microphone/helloworld/helloworld.vcxproj
index 12627cda7..56dd81560 100644
--- a/quickstart/cpp/windows/from-microphone/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/from-microphone/helloworld/helloworld.vcxproj
@@ -166,12 +166,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/from-microphone/helloworld/packages.config b/quickstart/cpp/windows/from-microphone/helloworld/packages.config
index e51eea91b..b60229a48 100644
--- a/quickstart/cpp/windows/from-microphone/helloworld/packages.config
+++ b/quickstart/cpp/windows/from-microphone/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/intent-recognition/README.md b/quickstart/cpp/windows/intent-recognition/README.md
index 29e8e0c73..2736f5120 100644
--- a/quickstart/cpp/windows/intent-recognition/README.md
+++ b/quickstart/cpp/windows/intent-recognition/README.md
@@ -5,16 +5,13 @@ This sample demonstrates how to recognize intent with C++ using the Speech SDK f
## Prerequisites
* A LUIS account. You can get one for free through the [LUIS portal](https://www.luis.ai/home).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourLanguageUnderstandingSubscriptionKey` with your own LUIS endpoint key.
diff --git a/quickstart/cpp/windows/intent-recognition/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/intent-recognition/helloworld/helloworld.vcxproj
index 12627cda7..56dd81560 100644
--- a/quickstart/cpp/windows/intent-recognition/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/intent-recognition/helloworld/helloworld.vcxproj
@@ -166,12 +166,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/intent-recognition/helloworld/packages.config b/quickstart/cpp/windows/intent-recognition/helloworld/packages.config
index e51eea91b..b60229a48 100644
--- a/quickstart/cpp/windows/intent-recognition/helloworld/packages.config
+++ b/quickstart/cpp/windows/intent-recognition/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/multi-device-conversation/README.md b/quickstart/cpp/windows/multi-device-conversation/README.md
index c93c24107..5db6b3290 100644
--- a/quickstart/cpp/windows/multi-device-conversation/README.md
+++ b/quickstart/cpp/windows/multi-device-conversation/README.md
@@ -1,20 +1,17 @@
# Quickstart: Multi-Device Conversation, C++ (Windows) - Speech Service
In this quickstart, you'll learn how to create a new multi-device conversation or join an existing one with C++ using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/Speech-Service/quickstarts/multi-device-conversation?pivots=programming-language-cpp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/Speech-Service/quickstarts/multi-device-conversation?pivots=programming-language-cpp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/cpp/windows/multi-device-conversation/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/multi-device-conversation/helloworld/helloworld.vcxproj
index f300d154d..c14637b8c 100644
--- a/quickstart/cpp/windows/multi-device-conversation/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/multi-device-conversation/helloworld/helloworld.vcxproj
@@ -158,12 +158,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/multi-device-conversation/helloworld/packages.config b/quickstart/cpp/windows/multi-device-conversation/helloworld/packages.config
index e51eea91b..b60229a48 100644
--- a/quickstart/cpp/windows/multi-device-conversation/helloworld/packages.config
+++ b/quickstart/cpp/windows/multi-device-conversation/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/speaker-recognition/README.md b/quickstart/cpp/windows/speaker-recognition/README.md
index 8b387fb13..93b3ccd0c 100644
--- a/quickstart/cpp/windows/speaker-recognition/README.md
+++ b/quickstart/cpp/windows/speaker-recognition/README.md
@@ -7,15 +7,13 @@ In this quickstart, you'll learn how to do speaker recognition, including creati
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* [Microsoft Visual Studio 2019](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2019 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/cpp/windows/speaker-recognition/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/speaker-recognition/helloworld/helloworld.vcxproj
index 6abd5d485..0f6f44f56 100644
--- a/quickstart/cpp/windows/speaker-recognition/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/speaker-recognition/helloworld/helloworld.vcxproj
@@ -175,12 +175,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/speaker-recognition/helloworld/packages.config b/quickstart/cpp/windows/speaker-recognition/helloworld/packages.config
index e51eea91b..b60229a48 100644
--- a/quickstart/cpp/windows/speaker-recognition/helloworld/packages.config
+++ b/quickstart/cpp/windows/speaker-recognition/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/text-to-speech/README.md b/quickstart/cpp/windows/text-to-speech/README.md
index d8c3fcdc0..b4332eccb 100644
--- a/quickstart/cpp/windows/text-to-speech/README.md
+++ b/quickstart/cpp/windows/text-to-speech/README.md
@@ -1,21 +1,18 @@
# Quickstart: Synthesize speech in C++ for Windows
This sample demonstrates how to synthesize speech with C++ using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-text-to-speech-cpp-windows) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-text-to-speech-cpp-windows) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working speaker or headset.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with a working speaker/headset and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/cpp/windows/text-to-speech/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/text-to-speech/helloworld/helloworld.vcxproj
index 12627cda7..56dd81560 100644
--- a/quickstart/cpp/windows/text-to-speech/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/text-to-speech/helloworld/helloworld.vcxproj
@@ -166,12 +166,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/text-to-speech/helloworld/packages.config b/quickstart/cpp/windows/text-to-speech/helloworld/packages.config
index e51eea91b..b60229a48 100644
--- a/quickstart/cpp/windows/text-to-speech/helloworld/packages.config
+++ b/quickstart/cpp/windows/text-to-speech/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/translate-speech-to-text/README.md b/quickstart/cpp/windows/translate-speech-to-text/README.md
index e89b49850..fb2366bf1 100644
--- a/quickstart/cpp/windows/translate-speech-to-text/README.md
+++ b/quickstart/cpp/windows/translate-speech-to-text/README.md
@@ -1,21 +1,18 @@
# Quickstart: Translate speech in C++ for Windows
This sample demonstrates how to translate speech with C++ using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-cpp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-cpp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `helloworld.cpp` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/cpp/windows/translate-speech-to-text/helloworld/helloworld.vcxproj b/quickstart/cpp/windows/translate-speech-to-text/helloworld/helloworld.vcxproj
index 4fab00933..ab424e036 100644
--- a/quickstart/cpp/windows/translate-speech-to-text/helloworld/helloworld.vcxproj
+++ b/quickstart/cpp/windows/translate-speech-to-text/helloworld/helloworld.vcxproj
@@ -167,12 +167,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/quickstart/cpp/windows/translate-speech-to-text/helloworld/packages.config b/quickstart/cpp/windows/translate-speech-to-text/helloworld/packages.config
index 3a0bf1d48..413069e72 100644
--- a/quickstart/cpp/windows/translate-speech-to-text/helloworld/packages.config
+++ b/quickstart/cpp/windows/translate-speech-to-text/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
diff --git a/quickstart/csharp/dotnet/conversation-transcription/README.md b/quickstart/csharp/dotnet/conversation-transcription/README.md
index 7cedaf733..5e5b91c37 100644
--- a/quickstart/csharp/dotnet/conversation-transcription/README.md
+++ b/quickstart/csharp/dotnet/conversation-transcription/README.md
@@ -1,21 +1,18 @@
# Quickstart: Transcribe conversations in C# under .NET Framework for Windows
This sample demonstrates how to transcribe conversations with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-csharp-dotnet-windows) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-csharp-dotnet-windows) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/conversation-transcription/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/conversation-transcription/helloworld/helloworld.csproj
index 1585cee85..448b84d2e 100644
--- a/quickstart/csharp/dotnet/conversation-transcription/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/conversation-transcription/helloworld/helloworld.csproj
@@ -102,7 +102,7 @@
- 1.41.1
+ 1.42.0
13.0.1
diff --git a/quickstart/csharp/dotnet/from-file/README.md b/quickstart/csharp/dotnet/from-file/README.md
index 6637dcd46..97a6c0908 100644
--- a/quickstart/csharp/dotnet/from-file/README.md
+++ b/quickstart/csharp/dotnet/from-file/README.md
@@ -1,21 +1,18 @@
# Quickstart: Recognize speech from a file in C# under .NET Framework for Windows
This sample demonstrates how to recognize speech with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Cjava-runtime%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Cjava-runtime%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/from-file/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/from-file/helloworld/helloworld.csproj
index 5c729463e..51bdb0663 100644
--- a/quickstart/csharp/dotnet/from-file/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/from-file/helloworld/helloworld.csproj
@@ -95,7 +95,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/from-file/helloworld/packages.config b/quickstart/csharp/dotnet/from-file/helloworld/packages.config
index 484b29cc7..5b206bb7a 100644
--- a/quickstart/csharp/dotnet/from-file/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/from-file/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/csharp/dotnet/from-microphone/README.md b/quickstart/csharp/dotnet/from-microphone/README.md
index 313585cfc..b24205f4c 100644
--- a/quickstart/csharp/dotnet/from-microphone/README.md
+++ b/quickstart/csharp/dotnet/from-microphone/README.md
@@ -1,21 +1,18 @@
# Quickstart: Recognize speech from a microphone in C# under .NET Framework for Windows
This sample demonstrates how to recognize speech with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/from-microphone/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/from-microphone/helloworld/helloworld.csproj
index 5c729463e..51bdb0663 100644
--- a/quickstart/csharp/dotnet/from-microphone/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/from-microphone/helloworld/helloworld.csproj
@@ -95,7 +95,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/from-microphone/helloworld/packages.config b/quickstart/csharp/dotnet/from-microphone/helloworld/packages.config
index 484b29cc7..5b206bb7a 100644
--- a/quickstart/csharp/dotnet/from-microphone/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/from-microphone/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/csharp/dotnet/intent-recognition/README.md b/quickstart/csharp/dotnet/intent-recognition/README.md
index eb6ac60cb..4017c37ba 100644
--- a/quickstart/csharp/dotnet/intent-recognition/README.md
+++ b/quickstart/csharp/dotnet/intent-recognition/README.md
@@ -1,21 +1,18 @@
# Quickstart: Recognize speech in C# under .NET Framework for Windows
This sample demonstrates how to recognize speech with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/intent-recognition/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/intent-recognition/helloworld/helloworld.csproj
index 74f48ca8f..27d5b3803 100644
--- a/quickstart/csharp/dotnet/intent-recognition/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/intent-recognition/helloworld/helloworld.csproj
@@ -97,7 +97,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/intent-recognition/helloworld/packages.config b/quickstart/csharp/dotnet/intent-recognition/helloworld/packages.config
index 484b29cc7..5b206bb7a 100644
--- a/quickstart/csharp/dotnet/intent-recognition/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/intent-recognition/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/csharp/dotnet/meeting-transcription/README.md b/quickstart/csharp/dotnet/meeting-transcription/README.md
index 8a5c1e660..a80300efb 100644
--- a/quickstart/csharp/dotnet/meeting-transcription/README.md
+++ b/quickstart/csharp/dotnet/meeting-transcription/README.md
@@ -1,21 +1,18 @@
# Quickstart: Transcribe meetings in C# under .NET Framework for Windows
This sample demonstrates how to transcribe meetings with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-csharp-dotnet-windows) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-csharp-dotnet-windows) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/meeting-transcription/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/meeting-transcription/helloworld/helloworld.csproj
index 8318c3c12..85d17f597 100644
--- a/quickstart/csharp/dotnet/meeting-transcription/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/meeting-transcription/helloworld/helloworld.csproj
@@ -110,7 +110,7 @@
- 1.41.1
+ 1.42.0
13.0.1
diff --git a/quickstart/csharp/dotnet/multi-device-conversation/README.md b/quickstart/csharp/dotnet/multi-device-conversation/README.md
index fc01f8853..94c70de5d 100644
--- a/quickstart/csharp/dotnet/multi-device-conversation/README.md
+++ b/quickstart/csharp/dotnet/multi-device-conversation/README.md
@@ -1,20 +1,17 @@
# Quickstart: Multi-Device Conversation, C# (.Net Framework Windows) - Speech Service
In this quickstart, you'll learn how to create a new multi-device conversation or join an existing one with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/Speech-Service/quickstarts/multi-device-conversation?pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/Speech-Service/quickstarts/multi-device-conversation?pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/multi-device-conversation/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/multi-device-conversation/helloworld/helloworld.csproj
index 8452918b2..afa874a28 100644
--- a/quickstart/csharp/dotnet/multi-device-conversation/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/multi-device-conversation/helloworld/helloworld.csproj
@@ -74,7 +74,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/multi-device-conversation/helloworld/packages.config b/quickstart/csharp/dotnet/multi-device-conversation/helloworld/packages.config
index ad3a8933b..71d775594 100644
--- a/quickstart/csharp/dotnet/multi-device-conversation/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/multi-device-conversation/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/csharp/dotnet/speaker-recognition/README.md b/quickstart/csharp/dotnet/speaker-recognition/README.md
index ffab4794b..2f99ac534 100644
--- a/quickstart/csharp/dotnet/speaker-recognition/README.md
+++ b/quickstart/csharp/dotnet/speaker-recognition/README.md
@@ -1,22 +1,19 @@
# Quickstart: Recognize speakers in C# (.Net Framework Windows) - Speech Service
In this quickstart, you'll learn how to do speaker recognition, including creating a voice profile, enrollment training and speaker verification and identification with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=dotnet%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
**Note:** Microsoft limits access to speaker recognition. You can apply for access through the [Azure Cognitive Services speaker recognition limited access review](https://aka.ms/azure-speaker-recognition). For more information, see [Limited access for speaker recognition](https://docs.microsoft.com/legal/cognitive-services/speech-service/speaker-recognition/limited-access-speaker-recognition).
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC.
-* [Microsoft Visual Studio 2019](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/speaker-recognition/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/speaker-recognition/helloworld/helloworld.csproj
index f6d133716..9afca35e9 100644
--- a/quickstart/csharp/dotnet/speaker-recognition/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/speaker-recognition/helloworld/helloworld.csproj
@@ -105,7 +105,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/speaker-recognition/helloworld/packages.config b/quickstart/csharp/dotnet/speaker-recognition/helloworld/packages.config
index 484b29cc7..5b206bb7a 100644
--- a/quickstart/csharp/dotnet/speaker-recognition/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/speaker-recognition/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/csharp/dotnet/text-to-speech/README.md b/quickstart/csharp/dotnet/text-to-speech/README.md
index f9c6a9735..fd98a296e 100644
--- a/quickstart/csharp/dotnet/text-to-speech/README.md
+++ b/quickstart/csharp/dotnet/text-to-speech/README.md
@@ -1,21 +1,18 @@
# Quickstart: Synthesize speech in C# under .NET Framework for Windows
This sample demonstrates how to synthesize speech with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/text-to-speech-audio-file?tabs=ubuntu%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/text-to-speech-audio-file?tabs=ubuntu%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working speaker or headset.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working speaker/headset and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/csharp/dotnet/text-to-speech/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/text-to-speech/helloworld/helloworld.csproj
index 36b9df1d8..1e1b5cb8e 100644
--- a/quickstart/csharp/dotnet/text-to-speech/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/text-to-speech/helloworld/helloworld.csproj
@@ -94,7 +94,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/text-to-speech/helloworld/packages.config b/quickstart/csharp/dotnet/text-to-speech/helloworld/packages.config
index 484b29cc7..5b206bb7a 100644
--- a/quickstart/csharp/dotnet/text-to-speech/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/text-to-speech/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/quickstart/csharp/dotnet/translate-speech-to-text/README.md b/quickstart/csharp/dotnet/translate-speech-to-text/README.md
index d5f3cd30f..843600a0e 100644
--- a/quickstart/csharp/dotnet/translate-speech-to-text/README.md
+++ b/quickstart/csharp/dotnet/translate-speech-to-text/README.md
@@ -1,21 +1,18 @@
# Quickstart: Translate speech in C# for .NET Framework for Windows
This sample demonstrates how to translate speech with C# under the .NET Framework (version 4.6.1 or above) using the Speech SDK for Windows.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key. Use the `Speech` resource in Azure (not the `Speech Recognition` resource).
diff --git a/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/helloworld.csproj b/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/helloworld.csproj
index b46077c53..e413bc65c 100644
--- a/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/helloworld.csproj
@@ -76,7 +76,7 @@
- 1.41.1
+ 1.42.0
diff --git a/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/packages.config b/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/packages.config
index 52a742c5c..d3987752e 100644
--- a/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/packages.config
+++ b/quickstart/csharp/dotnet/translate-speech-to-text/helloworld/packages.config
@@ -1,4 +1,4 @@
-
+
diff --git a/quickstart/csharp/dotnetcore/from-microphone/README.md b/quickstart/csharp/dotnetcore/from-microphone/README.md
index 9395af5b4..2e86fde48 100644
--- a/quickstart/csharp/dotnetcore/from-microphone/README.md
+++ b/quickstart/csharp/dotnetcore/from-microphone/README.md
@@ -1,6 +1,6 @@
# Quickstart: Recognize speech from a microphone in C# under .NET Core (Windows, macOS, or Linux)
-This sample demonstrates how to recognize speech with C# under .NET 6.0 (Windows, macOS, or Linux) using the Speech SDK.
+This sample demonstrates how to recognize speech with C# under .NET 8.0 (Windows, macOS, or Linux) using the Speech SDK.
> **Note:**
> We currently only support [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
@@ -8,30 +8,21 @@ This sample demonstrates how to recognize speech with C# under .NET 6.0 (Windows
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) with a working microphone.
-* Either one of the following:
- * On Windows:
- * [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
- * The **.NET Core cross-platform development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
- * On Windows, macOS, or Linux:
- * [.NET 6.0](https://dotnet.microsoft.com/download/dotnet/6.0)
- * On Windows you also need the [Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-csharp#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) with a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* If you are using Microsoft Visual Studio 2017 on Windows:
- * Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* If you are using Microsoft Visual Studio on Windows:
+ * Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
* Replace the string `YourServiceRegion` with the service region of your subscription.
For example, replace with `westus` if you are using the 30-day free trial subscription.
-* If you are using Microsoft Visual Studio 2017 on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
+* If you are using Microsoft Visual Studio on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
* If you are using the .NET Core CLI, run the following command from the directory that contains this sample:
```bash
@@ -40,7 +31,7 @@ This sample demonstrates how to recognize speech with C# under .NET 6.0 (Windows
## Run the sample
-### Using Visual Studio 2017
+### Using Visual Studio
To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging**. To run the app without debugging, press Ctrl+F5 or use **Debug** \> **Start Without Debugging**.
@@ -49,7 +40,7 @@ To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging
Run the following command from the directory that contains this sample:
```bash
-dotnet helloworld/bin/Debug/net6.0/helloworld.dll
+dotnet helloworld/bin/Debug/net8.0/helloworld.dll
```
## References
diff --git a/quickstart/csharp/dotnetcore/from-microphone/helloworld/helloworld.csproj b/quickstart/csharp/dotnetcore/from-microphone/helloworld/helloworld.csproj
index b5bef2de2..3b3018113 100644
--- a/quickstart/csharp/dotnetcore/from-microphone/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnetcore/from-microphone/helloworld/helloworld.csproj
@@ -2,12 +2,12 @@
Exe
- net6.0
+ net8.0
latest
-
+
diff --git a/quickstart/csharp/dotnetcore/text-to-speech/README.md b/quickstart/csharp/dotnetcore/text-to-speech/README.md
index 41eb5dd9a..9864dbbfe 100644
--- a/quickstart/csharp/dotnetcore/text-to-speech/README.md
+++ b/quickstart/csharp/dotnetcore/text-to-speech/README.md
@@ -1,7 +1,7 @@
# Quickstart: Synthesize speech in C# under .NET Core (Windows or Linux)
-This sample demonstrates how to synthesize speech with C# under .NET 6.0 (Windows or Linux) using the Speech SDK.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/text-to-speech-audio-file?tabs=ubuntu%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+This sample demonstrates how to synthesize speech with C# under .NET 8.0 (Windows or Linux) using the Speech SDK.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/text-to-speech-audio-file?tabs=ubuntu%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
> **Note:**
> We currently only support [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
@@ -9,30 +9,21 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) with a working speaker or headset.
-* Either one of the following:
- * On Windows:
- * [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
- * The **.NET Core cross-platform development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
- * On Windows or Linux:
- * [.NET 6.0](https://dotnet.microsoft.com/download/dotnet/6.0)
- * On Windows you also need the [Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-csharp#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) with a working speaker or headset. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* If you are using Microsoft Visual Studio 2017 on Windows:
- * Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* If you are using Microsoft Visual Studio on Windows:
+ * Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
* Replace the string `YourServiceRegion` with the service region of your subscription.
For example, replace with `westus` if you are using the 30-day free trial subscription.
-* If you are using Microsoft Visual Studio 2017 on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
+* If you are using Microsoft Visual Studio on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
* If you are using the .NET Core CLI, run the following command from the directory that contains this sample:
```bash
@@ -41,7 +32,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Run the sample
-### Using Visual Studio 2017
+### Using Visual Studio
To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging**. To run the app without debugging, press Ctrl+F5 or use **Debug** \> **Start Without Debugging**.
@@ -50,7 +41,7 @@ To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging
Run the following command from the directory that contains this sample:
```bash
-dotnet helloworld/bin/Debug/net6.0/helloworld.dll
+dotnet helloworld/bin/Debug/net8.0/helloworld.dll
```
## References
diff --git a/quickstart/csharp/dotnetcore/text-to-speech/helloworld/helloworld.csproj b/quickstart/csharp/dotnetcore/text-to-speech/helloworld/helloworld.csproj
index b5bef2de2..3b3018113 100644
--- a/quickstart/csharp/dotnetcore/text-to-speech/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnetcore/text-to-speech/helloworld/helloworld.csproj
@@ -2,12 +2,12 @@
Exe
- net6.0
+ net8.0
latest
-
+
diff --git a/quickstart/csharp/dotnetcore/translate-speech-to-text/README.md b/quickstart/csharp/dotnetcore/translate-speech-to-text/README.md
index 19e26984b..16ec850c5 100644
--- a/quickstart/csharp/dotnetcore/translate-speech-to-text/README.md
+++ b/quickstart/csharp/dotnetcore/translate-speech-to-text/README.md
@@ -1,7 +1,7 @@
# Quickstart: Translate speech in C# for .NET Core on Windows or Linux
-This sample demonstrates how to translate speech with C# under .NET 6.0 (Windows or Linux) using the Speech SDK.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+This sample demonstrates how to translate speech with C# under .NET 8.0 (Windows or Linux) using the Speech SDK.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
> **Note:**
> We currently only support [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
@@ -9,30 +9,21 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) with a working microphone.
-* Either one of the following:
- * On Windows:
- * [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
- * The **.NET Core cross-platform development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
- * On Windows or Linux:
- * [.NET 6.0](https://dotnet.microsoft.com/download/dotnet/6.0)
- * On Windows you also need the [Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-csharp#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) with a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* If you are using Microsoft Visual Studio 2017 on Windows:
- * Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* If you are using Microsoft Visual Studio on Windows:
+ * Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `Program.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
* Replace the string `YourServiceRegion` with the service region of your subscription.
For example, replace with `westus` if you are using the 30-day free trial subscription.
-* If you are using Microsoft Visual Studio 2017 on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
+* If you are using Microsoft Visual Studio on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
* If you are using the .NET Core CLI, run the following command from the directory that contains this sample:
```bash
@@ -41,7 +32,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Run the sample
-### Using Visual Studio 2017
+### Using Visual Studio
To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging**. To run the app without debugging, press Ctrl+F5 or use **Debug** \> **Start Without Debugging**.
@@ -50,7 +41,7 @@ To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging
Run the following command from the directory that contains this sample:
```bash
-dotnet helloworld/bin/Debug/net6.0/helloworld.dll
+dotnet helloworld/bin/Debug/net8.0/helloworld.dll
```
## References
diff --git a/quickstart/csharp/dotnetcore/translate-speech-to-text/helloworld/helloworld.csproj b/quickstart/csharp/dotnetcore/translate-speech-to-text/helloworld/helloworld.csproj
index b5bef2de2..3b3018113 100644
--- a/quickstart/csharp/dotnetcore/translate-speech-to-text/helloworld/helloworld.csproj
+++ b/quickstart/csharp/dotnetcore/translate-speech-to-text/helloworld/helloworld.csproj
@@ -2,12 +2,12 @@
Exe
- net6.0
+ net8.0
latest
-
+
diff --git a/quickstart/csharp/uwp/from-microphone/README.md b/quickstart/csharp/uwp/from-microphone/README.md
index 092c5d2e9..f1014070c 100644
--- a/quickstart/csharp/uwp/from-microphone/README.md
+++ b/quickstart/csharp/uwp/from-microphone/README.md
@@ -1,23 +1,18 @@
# Quickstart: Recognize speech from a microphone in C# under Universal Windows Platform
This sample demonstrates how to recognize speech with C# under the Universal Windows Platform using the Speech SDK.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=uwp%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=uwp%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later and with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
-* Note: processor target ARM or ARM64 is not yet supported.
-
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `MainPage.xaml.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/csharp/uwp/from-microphone/helloworld/helloworld.csproj b/quickstart/csharp/uwp/from-microphone/helloworld/helloworld.csproj
index 1bddb2d6f..4dad54b71 100644
--- a/quickstart/csharp/uwp/from-microphone/helloworld/helloworld.csproj
+++ b/quickstart/csharp/uwp/from-microphone/helloworld/helloworld.csproj
@@ -106,7 +106,7 @@
- 1.41.1
+ 1.42.0
6.2.8
diff --git a/quickstart/csharp/uwp/keyword-recognizer/README.md b/quickstart/csharp/uwp/keyword-recognizer/README.md
index 0a4042b36..a7d4646ac 100644
--- a/quickstart/csharp/uwp/keyword-recognizer/README.md
+++ b/quickstart/csharp/uwp/keyword-recognizer/README.md
@@ -1,21 +1,17 @@
# Quickstart: Recognize keywords in C# under Universal Windows Platform
This sample demonstrates how to recognize keywords with C# under the Universal Windows Platform using the Speech SDK.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=uwp%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/speech-to-text-from-microphone?tabs=uwp%2Cx-android%2Clinux%2Candroid%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later and with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
-
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Set the active solution configuration and platform to the desired values under **Build** \> **Configuration Manager**:
* On a 64-bit Windows installation, choose `x64` as active solution platform.
diff --git a/quickstart/csharp/uwp/keyword-recognizer/helloworld/helloworld.csproj b/quickstart/csharp/uwp/keyword-recognizer/helloworld/helloworld.csproj
index 5edefd4e9..481cf5f59 100644
--- a/quickstart/csharp/uwp/keyword-recognizer/helloworld/helloworld.csproj
+++ b/quickstart/csharp/uwp/keyword-recognizer/helloworld/helloworld.csproj
@@ -154,7 +154,7 @@
- 1.41.1
+ 1.42.0
6.2.9
diff --git a/quickstart/csharp/uwp/text-to-speech/README.md b/quickstart/csharp/uwp/text-to-speech/README.md
index 9173a9b66..144566bb1 100644
--- a/quickstart/csharp/uwp/text-to-speech/README.md
+++ b/quickstart/csharp/uwp/text-to-speech/README.md
@@ -1,23 +1,18 @@
# Quickstart: Synthesize speech in C# under Universal Windows Platform
This sample demonstrates how to synthesize speech with C# under the Universal Windows Platform using the Speech SDK.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/text-to-speech-audio-file?tabs=ubuntu%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/text-to-speech-audio-file?tabs=ubuntu%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later and with a working speaker/headset.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
-* Note: processor target ARM or ARM64 is not yet supported.
-
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working speaker/headset and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `MainPage.xaml.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/csharp/uwp/text-to-speech/helloworld/helloworld.csproj b/quickstart/csharp/uwp/text-to-speech/helloworld/helloworld.csproj
index 752aa7978..d4b3c0faa 100644
--- a/quickstart/csharp/uwp/text-to-speech/helloworld/helloworld.csproj
+++ b/quickstart/csharp/uwp/text-to-speech/helloworld/helloworld.csproj
@@ -106,7 +106,7 @@
- 1.41.1
+ 1.42.0
6.2.8
diff --git a/quickstart/csharp/uwp/translate-speech-to-text/README.md b/quickstart/csharp/uwp/translate-speech-to-text/README.md
index 771d0f1d4..999890f34 100644
--- a/quickstart/csharp/uwp/translate-speech-to-text/README.md
+++ b/quickstart/csharp/uwp/translate-speech-to-text/README.md
@@ -1,23 +1,18 @@
# Quickstart: Translate speech from a microphone in C# for Universal Windows Platform
This sample demonstrates how to translate speech with C# under the Universal Windows Platform using the Speech SDK.
-See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio 2017.
+See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-csharp) on the SDK documentation page which describes how to build this sample from scratch in Visual Studio.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later and with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
-* Note: processor target ARM or ARM64 is not yet supported.
-
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `MainPage.xaml.cs` source:
* Replace the string `YourSubscriptionKey` with your own subscription key.
diff --git a/quickstart/csharp/uwp/translate-speech-to-text/helloworld/helloworld.csproj b/quickstart/csharp/uwp/translate-speech-to-text/helloworld/helloworld.csproj
index e9deec321..9db8403fa 100644
--- a/quickstart/csharp/uwp/translate-speech-to-text/helloworld/helloworld.csproj
+++ b/quickstart/csharp/uwp/translate-speech-to-text/helloworld/helloworld.csproj
@@ -106,7 +106,7 @@
- 1.41.1
+ 1.42.0
6.2.8
diff --git a/quickstart/csharp/uwp/virtual-assistant/README.md b/quickstart/csharp/uwp/virtual-assistant/README.md
index a52d5bf70..73e405c07 100644
--- a/quickstart/csharp/uwp/virtual-assistant/README.md
+++ b/quickstart/csharp/uwp/virtual-assistant/README.md
@@ -7,15 +7,13 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
* A previously created bot configured with the [Direct Line Speech channel](https://docs.microsoft.com/azure/bot-service/bot-service-channel-connect-directlinespeech)
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later, with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Edit the `MainPage.xaml.cs` source:
* Replace the string `YourChannelSecret` with your own channel secret id.
diff --git a/quickstart/csharp/uwp/virtual-assistant/helloworld.csproj b/quickstart/csharp/uwp/virtual-assistant/helloworld.csproj
index 6765e7a2a..9cfde5391 100644
--- a/quickstart/csharp/uwp/virtual-assistant/helloworld.csproj
+++ b/quickstart/csharp/uwp/virtual-assistant/helloworld.csproj
@@ -153,7 +153,7 @@
- 1.41.1
+ 1.42.0
6.2.9
diff --git a/quickstart/java/android/from-microphone/README.md b/quickstart/java/android/from-microphone/README.md
index 2770308a0..98bf8dca9 100644
--- a/quickstart/java/android/from-microphone/README.md
+++ b/quickstart/java/android/from-microphone/README.md
@@ -6,9 +6,9 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/java/android/from-microphone/app/build.gradle b/quickstart/java/android/from-microphone/app/build.gradle
index 21cd202a7..ab1f79c0a 100644
--- a/quickstart/java/android/from-microphone/app/build.gradle
+++ b/quickstart/java/android/from-microphone/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'
diff --git a/quickstart/java/android/intent-recognition/README.md b/quickstart/java/android/intent-recognition/README.md
index bbecd452d..9a02fe7d8 100644
--- a/quickstart/java/android/intent-recognition/README.md
+++ b/quickstart/java/android/intent-recognition/README.md
@@ -7,9 +7,9 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
* A LUIS account. You can get one for free through the [LUIS portal](https://www.luis.ai/home).
* A new or existing LUIS app - [create LUIS app](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstarts/intent-recognition#create-a-luis-app-for-intent-recognition)
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* [Android Studio](https://developer.android.com/studio/).
-* An ARM32, ARM64, or x86-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/java/android/intent-recognition/app/build.gradle b/quickstart/java/android/intent-recognition/app/build.gradle
index 21cd202a7..ab1f79c0a 100644
--- a/quickstart/java/android/intent-recognition/app/build.gradle
+++ b/quickstart/java/android/intent-recognition/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'
diff --git a/quickstart/java/android/keyword-recognizer-stream/README.md b/quickstart/java/android/keyword-recognizer-stream/README.md
index 49043ad0c..ad255da15 100644
--- a/quickstart/java/android/keyword-recognizer-stream/README.md
+++ b/quickstart/java/android/keyword-recognizer-stream/README.md
@@ -6,9 +6,9 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* [Android Studio](https://developer.android.com/studio/).
-* Android device or emulator (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/java/android/keyword-recognizer-stream/app/build.gradle b/quickstart/java/android/keyword-recognizer-stream/app/build.gradle
index 21cd202a7..ab1f79c0a 100644
--- a/quickstart/java/android/keyword-recognizer-stream/app/build.gradle
+++ b/quickstart/java/android/keyword-recognizer-stream/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'
diff --git a/quickstart/java/android/keyword-recognizer/README.md b/quickstart/java/android/keyword-recognizer/README.md
index 98fc5c41a..0fa7ef30f 100644
--- a/quickstart/java/android/keyword-recognizer/README.md
+++ b/quickstart/java/android/keyword-recognizer/README.md
@@ -5,9 +5,9 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* [Android Studio](https://developer.android.com/studio/).
-* Android device or emulator (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/java/android/keyword-recognizer/app/build.gradle b/quickstart/java/android/keyword-recognizer/app/build.gradle
index 21cd202a7..ab1f79c0a 100644
--- a/quickstart/java/android/keyword-recognizer/app/build.gradle
+++ b/quickstart/java/android/keyword-recognizer/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'
diff --git a/quickstart/java/android/text-to-speech/README.md b/quickstart/java/android/text-to-speech/README.md
index 76e2c73cf..f756c8072 100644
--- a/quickstart/java/android/text-to-speech/README.md
+++ b/quickstart/java/android/text-to-speech/README.md
@@ -6,9 +6,9 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker, or an [Android emulator](https://developer.android.com/studio/run/emulator).
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker, or an [Android emulator](https://developer.android.com/studio/run/emulator).
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/quickstart/java/android/text-to-speech/app/build.gradle b/quickstart/java/android/text-to-speech/app/build.gradle
index 21cd202a7..ab1f79c0a 100644
--- a/quickstart/java/android/text-to-speech/app/build.gradle
+++ b/quickstart/java/android/text-to-speech/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'
diff --git a/quickstart/java/jre/conversation-transcription/README.md b/quickstart/java/jre/conversation-transcription/README.md
index 1dae15b98..4a9d54048 100644
--- a/quickstart/java/jre/conversation-transcription/README.md
+++ b/quickstart/java/jre/conversation-transcription/README.md
@@ -3,17 +3,12 @@
This sample demonstrates how to transcribe conversation with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/how-to-use-conversation-transcription) on the SDK documentation page which describes details about conversation transcription service.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
> Note: See the [limitations in supported regions](https://docs.microsoft.com/azure/cognitive-services/speech-service/how-to-use-conversation-transcription)
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/conversation-transcription/pom.xml b/quickstart/java/jre/conversation-transcription/pom.xml
index fbda3d0fa..63f89fc46 100644
--- a/quickstart/java/jre/conversation-transcription/pom.xml
+++ b/quickstart/java/jre/conversation-transcription/pom.xml
@@ -39,7 +39,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
com.google.code.gson
diff --git a/quickstart/java/jre/from-microphone/README.md b/quickstart/java/jre/from-microphone/README.md
index 4faffd896..f2a967570 100644
--- a/quickstart/java/jre/from-microphone/README.md
+++ b/quickstart/java/jre/from-microphone/README.md
@@ -3,16 +3,11 @@
This sample demonstrates how to recognize speech with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-java-jre) on the SDK documentation page which describes how to build this sample from scratch in Eclipse.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/from-microphone/pom.xml b/quickstart/java/jre/from-microphone/pom.xml
index e4531d6e3..a76a3d088 100644
--- a/quickstart/java/jre/from-microphone/pom.xml
+++ b/quickstart/java/jre/from-microphone/pom.xml
@@ -62,7 +62,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
diff --git a/quickstart/java/jre/intent-recognition/README.md b/quickstart/java/jre/intent-recognition/README.md
index 53fa487d6..8a156c0fd 100644
--- a/quickstart/java/jre/intent-recognition/README.md
+++ b/quickstart/java/jre/intent-recognition/README.md
@@ -3,16 +3,11 @@
This sample demonstrates how to recognize speech with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-java-jre) on the SDK documentation page which describes how to build this sample from scratch in Eclipse.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/intent-recognition/pom.xml b/quickstart/java/jre/intent-recognition/pom.xml
index b88c55533..d7ca9dc19 100644
--- a/quickstart/java/jre/intent-recognition/pom.xml
+++ b/quickstart/java/jre/intent-recognition/pom.xml
@@ -39,7 +39,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
diff --git a/quickstart/java/jre/meeting-transcription/README.md b/quickstart/java/jre/meeting-transcription/README.md
index f59b655ab..5aabe7944 100644
--- a/quickstart/java/jre/meeting-transcription/README.md
+++ b/quickstart/java/jre/meeting-transcription/README.md
@@ -3,17 +3,12 @@
This sample demonstrates how to enroll participant's voice signatures and transcribe metting with 2 participants with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/how-to-use-meeting-transcription) on the SDK documentation page which describes details about meeting transcription service.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
> Note: See the [limitations in supported regions](https://docs.microsoft.com/azure/cognitive-services/speech-service/how-to-use-meeting-transcription)
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/meeting-transcription/pom.xml b/quickstart/java/jre/meeting-transcription/pom.xml
index 1ac6f2d92..7bedcfc45 100644
--- a/quickstart/java/jre/meeting-transcription/pom.xml
+++ b/quickstart/java/jre/meeting-transcription/pom.xml
@@ -39,7 +39,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
com.google.code.gson
diff --git a/quickstart/java/jre/speaker-recognition/README.md b/quickstart/java/jre/speaker-recognition/README.md
index 3c9ae1c65..28a529cee 100644
--- a/quickstart/java/jre/speaker-recognition/README.md
+++ b/quickstart/java/jre/speaker-recognition/README.md
@@ -3,18 +3,13 @@
This sample demonstrates how to perform speaker recognition with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-java-jre) on the SDK documentation page which describes how to build this sample from scratch in Eclipse.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
**Note:** Microsoft limits access to speaker recognition. You can apply for access through the [Azure Cognitive Services speaker recognition limited access review](https://aka.ms/azure-speaker-recognition). For more information, see [Limited access for speaker recognition](https://docs.microsoft.com/legal/cognitive-services/speech-service/speaker-recognition/limited-access-speaker-recognition).
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/speaker-recognition/pom.xml b/quickstart/java/jre/speaker-recognition/pom.xml
index 072d736f9..e15d17d35 100644
--- a/quickstart/java/jre/speaker-recognition/pom.xml
+++ b/quickstart/java/jre/speaker-recognition/pom.xml
@@ -36,7 +36,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
diff --git a/quickstart/java/jre/text-to-speech/README.md b/quickstart/java/jre/text-to-speech/README.md
index e78aeb695..7e3e49b5b 100644
--- a/quickstart/java/jre/text-to-speech/README.md
+++ b/quickstart/java/jre/text-to-speech/README.md
@@ -3,16 +3,11 @@
This sample demonstrates how to synthesize speech with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-text-to-speech-java-jre) on the SDK documentation page which describes how to build this sample from scratch in Eclipse.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working speaker.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working speaker.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/text-to-speech/pom.xml b/quickstart/java/jre/text-to-speech/pom.xml
index f35f343e2..24cb46e35 100644
--- a/quickstart/java/jre/text-to-speech/pom.xml
+++ b/quickstart/java/jre/text-to-speech/pom.xml
@@ -62,7 +62,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
diff --git a/quickstart/java/jre/translate-speech-to-text/README.md b/quickstart/java/jre/translate-speech-to-text/README.md
index 2ca4bd7df..194d20965 100644
--- a/quickstart/java/jre/translate-speech-to-text/README.md
+++ b/quickstart/java/jre/translate-speech-to-text/README.md
@@ -3,16 +3,11 @@
This sample demonstrates how to translate speech with the Speech SDK for Java on Windows, macOS or Linux.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started-speech-translation?tabs=script%2Cwindowsinstall&pivots=programming-language-java) on the SDK documentation page which describes how to build this sample from scratch in Eclipse.
-> **Note:**
-> The Speech SDK for the JRE currently supports the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/translate-speech-to-text/pom.xml b/quickstart/java/jre/translate-speech-to-text/pom.xml
index d608a501f..e45fca66e 100644
--- a/quickstart/java/jre/translate-speech-to-text/pom.xml
+++ b/quickstart/java/jre/translate-speech-to-text/pom.xml
@@ -20,7 +20,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
diff --git a/quickstart/java/jre/virtual-assistant/README.md b/quickstart/java/jre/virtual-assistant/README.md
index 53c34fc1f..fbe451e4e 100644
--- a/quickstart/java/jre/virtual-assistant/README.md
+++ b/quickstart/java/jre/virtual-assistant/README.md
@@ -3,17 +3,12 @@
This sample demonstrates how to recognize speech for custom voice assistants and receive activity responses.
See the [accompanying article](https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-voice-assistant-java-jre) on the SDK documentation page which describes how to build this sample from scratch in Eclipse.
-> **Note:**
-> The Speech SDK for the JRE currently supports only the Windows x64 platform, macOS x64 (10.14 or later), macOS M1 arm64 (11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) or Mac (macOS 10.14 or later) capable to run Eclipse,[[1]](#footnote1) with a working microphone.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) with a working microphone.
* A pre-configured bot created using Bot Framework version 4.2 or above. See [here for steps on how to create a bot](https://blog.botframework.com/2018/05/07/build-a-microsoft-bot-framework-bot-with-the-bot-builder-sdk-v4/). The bot would need to subscribe to the new "Direct Line Speech" channel to receive voice inputs.
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/quickstart/java/jre/virtual-assistant/pom.xml b/quickstart/java/jre/virtual-assistant/pom.xml
index 982e3279e..6df921233 100644
--- a/quickstart/java/jre/virtual-assistant/pom.xml
+++ b/quickstart/java/jre/virtual-assistant/pom.xml
@@ -12,7 +12,7 @@
default
https://repo1.maven.org/maven2
- false
+ false
@@ -20,13 +20,13 @@
Microsoft Cognitive Services Local Maven Repository
file:///localplaceholder//maven//
-
+
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
org.slf4j
@@ -81,6 +81,11 @@
1.8
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+ 3.1.2
+
-
+
\ No newline at end of file
diff --git a/quickstart/javascript/node/conversation-transcription/package.json b/quickstart/javascript/node/conversation-transcription/package.json
index 2bb4a57fc..b2b09ff1c 100644
--- a/quickstart/javascript/node/conversation-transcription/package.json
+++ b/quickstart/javascript/node/conversation-transcription/package.json
@@ -12,6 +12,6 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0"
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0"
}
}
diff --git a/quickstart/javascript/node/from-file/package.json b/quickstart/javascript/node/from-file/package.json
index 2bb4a57fc..b2b09ff1c 100644
--- a/quickstart/javascript/node/from-file/package.json
+++ b/quickstart/javascript/node/from-file/package.json
@@ -12,6 +12,6 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0"
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0"
}
}
diff --git a/quickstart/javascript/node/meeting-transcription/package.json b/quickstart/javascript/node/meeting-transcription/package.json
index 2bb4a57fc..b2b09ff1c 100644
--- a/quickstart/javascript/node/meeting-transcription/package.json
+++ b/quickstart/javascript/node/meeting-transcription/package.json
@@ -12,6 +12,6 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0"
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0"
}
}
diff --git a/quickstart/javascript/node/speaker-recognition/identification/package.json b/quickstart/javascript/node/speaker-recognition/identification/package.json
index 3e6fc03b2..3d6ffbd40 100644
--- a/quickstart/javascript/node/speaker-recognition/identification/package.json
+++ b/quickstart/javascript/node/speaker-recognition/identification/package.json
@@ -12,6 +12,6 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0"
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0"
}
}
diff --git a/quickstart/javascript/node/speaker-recognition/verification/package.json b/quickstart/javascript/node/speaker-recognition/verification/package.json
index 7c55381fe..b7d6690d8 100644
--- a/quickstart/javascript/node/speaker-recognition/verification/package.json
+++ b/quickstart/javascript/node/speaker-recognition/verification/package.json
@@ -12,6 +12,6 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0"
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0"
}
}
diff --git a/quickstart/javascript/node/text-to-speech/package.json b/quickstart/javascript/node/text-to-speech/package.json
index 3754c015f..685c1e755 100644
--- a/quickstart/javascript/node/text-to-speech/package.json
+++ b/quickstart/javascript/node/text-to-speech/package.json
@@ -12,7 +12,7 @@
"license": "MIT",
"dependencies": {
"https-proxy-agent": "^3.0.0",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0",
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0",
"readline": "^1.3.0"
}
}
diff --git a/quickstart/objectivec/ios/from-microphone/README.md b/quickstart/objectivec/ios/from-microphone/README.md
index cafeb3832..736a26e39 100644
--- a/quickstart/objectivec/ios/from-microphone/README.md
+++ b/quickstart/objectivec/ios/from-microphone/README.md
@@ -6,7 +6,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Mac with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later installed as iOS development environment. This tutorial targets iOS versions 9.2 or later.
+* A Mac with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed as iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the Code for the Sample App
diff --git a/quickstart/objectivec/ios/text-to-speech/README.md b/quickstart/objectivec/ios/text-to-speech/README.md
index 9a3170e5f..2fba4f73d 100644
--- a/quickstart/objectivec/ios/text-to-speech/README.md
+++ b/quickstart/objectivec/ios/text-to-speech/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create a iOS app in Objective-C using the Cognit
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Mac with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed as iOS development environment. This tutorial targets iOS versions 11.0 or later.
+* A Mac with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed as iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the Code for the Sample App
diff --git a/quickstart/objectivec/macos/from-microphone/README.md b/quickstart/objectivec/macos/from-microphone/README.md
index 43e7d750f..22039a193 100644
--- a/quickstart/objectivec/macos/from-microphone/README.md
+++ b/quickstart/objectivec/macos/from-microphone/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create a macOS app in Objective-C using the Cogn
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and macOS 10.14 or later
+* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the Code for the Sample App
diff --git a/quickstart/objectivec/macos/from-microphone/helloworld/Podfile b/quickstart/objectivec/macos/from-microphone/helloworld/Podfile
index 6a8080e85..16d04b0a5 100644
--- a/quickstart/objectivec/macos/from-microphone/helloworld/Podfile
+++ b/quickstart/objectivec/macos/from-microphone/helloworld/Podfile
@@ -1,4 +1,4 @@
target 'helloworld' do
platform :osx, '10.13'
- pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.41.1'
+ pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.42.0'
end
diff --git a/quickstart/objectivec/macos/text-to-speech/README.md b/quickstart/objectivec/macos/text-to-speech/README.md
index 915fcb21e..6a9883d5d 100644
--- a/quickstart/objectivec/macos/text-to-speech/README.md
+++ b/quickstart/objectivec/macos/text-to-speech/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create a macOS app in Objective-C using the Cogn
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and macOS 10.14 or later.
+* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the Code for the Sample App
diff --git a/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile b/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile
index 6a8080e85..16d04b0a5 100644
--- a/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile
+++ b/quickstart/objectivec/macos/text-to-speech/helloworld/Podfile
@@ -1,4 +1,4 @@
target 'helloworld' do
platform :osx, '10.13'
- pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.41.1'
+ pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.42.0'
end
diff --git a/quickstart/python/from-microphone/README.md b/quickstart/python/from-microphone/README.md
index a373d18aa..db27f6b10 100644
--- a/quickstart/python/from-microphone/README.md
+++ b/quickstart/python/from-microphone/README.md
@@ -9,10 +9,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
Before you get started, here's a list of prerequisites:
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* On Windows and Linux Python 3.6 or later needs to be installed. On Mac, minimum version for Python is 3.7. Downloads are available [here](https://www.python.org/downloads/).
-* The Python Speech SDK package is available for Windows (x64 and x86), Mac x64 (macOS X version 10.14 or later), Mac arm64 (macOS version 11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-python#platform-requirements) for installing the required dependencies.
-* On Windows you need the [Microsoft Visual C++ Redistributable for Visual Studio 2017](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python) for details on system requirements and setup.
## Get the Speech SDK Python Package
diff --git a/quickstart/python/intent-recognition/README.md b/quickstart/python/intent-recognition/README.md
index 46886003c..db5d3879e 100644
--- a/quickstart/python/intent-recognition/README.md
+++ b/quickstart/python/intent-recognition/README.md
@@ -9,10 +9,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
Before you get started, here's a list of prerequisites:
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* On Windows and Linux Python 3.6 or later needs to be installed. On Mac, minimum version for Python is 3.7. Downloads are available [here](https://www.python.org/downloads/).
-* The Python Speech SDK package is available for Windows (x64 and x86), Mac x64 (macOS X version 10.14 or later), Mac arm64 (macOS version 11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-python#platform-requirements) for installing the required dependencies.
-* On Windows you need the [Microsoft Visual C++ Redistributable for Visual Studio 2017](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python) for details on system requirements and setup.
## Get the Speech SDK Python Package
diff --git a/quickstart/python/text-to-speech/README.md b/quickstart/python/text-to-speech/README.md
index 4c5ea6b21..1482404f8 100644
--- a/quickstart/python/text-to-speech/README.md
+++ b/quickstart/python/text-to-speech/README.md
@@ -9,10 +9,7 @@ See the [accompanying article](https://docs.microsoft.com/azure/cognitive-servic
Before you get started, here's a list of prerequisites:
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* On Windows and Linux Python 3.6 or later needs to be installed. On Mac, minimum version for Python is 3.7. Downloads are available [here](https://www.python.org/downloads/).
-* The Python Speech SDK package is available for Windows (x64 and x86), Mac x64 (macOS X version 10.14 or later), Mac arm64 (macOS version 11.0 or later), and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-python#platform-requirements) for installing the required dependencies.
-* On Windows you need the [Microsoft Visual C++ Redistributable for Visual Studio 2017](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python) for details on system requirements and setup.
## Get the Speech SDK Python Package
diff --git a/quickstart/swift/ios/from-microphone/README.md b/quickstart/swift/ios/from-microphone/README.md
index 49b2985b6..82fb9824d 100644
--- a/quickstart/swift/ios/from-microphone/README.md
+++ b/quickstart/swift/ios/from-microphone/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create an iOS app in Swift using the Cognitive S
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and [CocoaPods](https://cocoapods.org/) installed.
+* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/quickstart/swift/ios/text-to-speech/README.md b/quickstart/swift/ios/text-to-speech/README.md
index bf2cf1eff..0d02b7b5c 100644
--- a/quickstart/swift/ios/text-to-speech/README.md
+++ b/quickstart/swift/ios/text-to-speech/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create an iOS app in Swift using the Cognitive S
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and [CocoaPods](https://cocoapods.org/) installed.
+* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/quickstart/swift/macos/from-microphone/README.md b/quickstart/swift/macos/from-microphone/README.md
index dd88a8dbb..5568a1e11 100644
--- a/quickstart/swift/macos/from-microphone/README.md
+++ b/quickstart/swift/macos/from-microphone/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create an macOS app in Swift using the Cognitive
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and [CocoaPods](https://cocoapods.org/) installed.
+* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/quickstart/swift/macos/text-to-speech/README.md b/quickstart/swift/macos/text-to-speech/README.md
index 0503afbb2..ae1ef6efa 100644
--- a/quickstart/swift/macos/text-to-speech/README.md
+++ b/quickstart/swift/macos/text-to-speech/README.md
@@ -5,7 +5,7 @@ This sample demonstrates how to create an macOS app in Swift using the Cognitive
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and [CocoaPods](https://cocoapods.org/) installed.
+* A macOS machine with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/samples/batch-synthesis/csharp/BatchSynthesisSample/BatchSynthesisSample.csproj b/samples/batch-synthesis/csharp/BatchSynthesisSample/BatchSynthesisSample.csproj
index 36b0e391f..72d3c4cf7 100644
--- a/samples/batch-synthesis/csharp/BatchSynthesisSample/BatchSynthesisSample.csproj
+++ b/samples/batch-synthesis/csharp/BatchSynthesisSample/BatchSynthesisSample.csproj
@@ -1,7 +1,7 @@
Exe
- net6.0
+ net8.0
enable
diff --git a/samples/batch-synthesis/java/pom.xml b/samples/batch-synthesis/java/pom.xml
index 5d344d757..a21c76944 100644
--- a/samples/batch-synthesis/java/pom.xml
+++ b/samples/batch-synthesis/java/pom.xml
@@ -17,13 +17,7 @@
com.fasterxml.jackson.datatype
jackson-datatype-jsr310
- 2.9.8
-
-
-
- org.apache.httpcomponents
- httpcomponents-core
- 4.4.15
+ 2.10.0
org.apache.httpcomponents
diff --git a/samples/cpp/embedded-speech/samples/packages.config b/samples/cpp/embedded-speech/samples/packages.config
index ca927e1d9..5bcbfd002 100644
--- a/samples/cpp/embedded-speech/samples/packages.config
+++ b/samples/cpp/embedded-speech/samples/packages.config
@@ -1,9 +1,9 @@
-
-
-
-
-
+
+
+
+
+
\ No newline at end of file
diff --git a/samples/cpp/embedded-speech/samples/samples.vcxproj b/samples/cpp/embedded-speech/samples/samples.vcxproj
index 559d8dc79..c79126a41 100644
--- a/samples/cpp/embedded-speech/samples/samples.vcxproj
+++ b/samples/cpp/embedded-speech/samples/samples.vcxproj
@@ -77,11 +77,11 @@
-
-
-
-
-
+
+
+
+
+
@@ -241,11 +241,11 @@
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
-
-
-
+
+
+
+
+
\ No newline at end of file
diff --git a/samples/cpp/windows/console/README.md b/samples/cpp/windows/console/README.md
index ae63c27f9..bbfbee974 100644
--- a/samples/cpp/windows/console/README.md
+++ b/samples/cpp/windows/console/README.md
@@ -11,16 +11,13 @@ This sample demonstrates various forms of speech recognition, intent recognition
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC; some sample scenarios require a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Desktop development with C++** workload in Visual Studio and the **NuGet package manager** component in Visual Studio.
- You can enable both in **Tools** \> **Get Tools and Features**, under the **Workloads** and **Individual components** tabs, respectively.
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed; some sample scenarios require a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-cpp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* To tailor the sample to your configuration, use search and replace across the whole solution (for example, via **Edit** \> **Find and Replace** \> **Quick Replace**) to update the following strings:
diff --git a/samples/cpp/windows/console/samples/packages.config b/samples/cpp/windows/console/samples/packages.config
index 52ced3a4e..024998aa7 100644
--- a/samples/cpp/windows/console/samples/packages.config
+++ b/samples/cpp/windows/console/samples/packages.config
@@ -1,6 +1,6 @@
-
-
+
+
\ No newline at end of file
diff --git a/samples/cpp/windows/console/samples/samples.vcxproj b/samples/cpp/windows/console/samples/samples.vcxproj
index c34599f17..90b718672 100644
--- a/samples/cpp/windows/console/samples/samples.vcxproj
+++ b/samples/cpp/windows/console/samples/samples.vcxproj
@@ -56,8 +56,8 @@
-
-
+
+
@@ -215,8 +215,8 @@
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
-
+
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/console/README.md b/samples/csharp/dotnet-windows/console/README.md
index 675c07a24..3c22b6902 100644
--- a/samples/csharp/dotnet-windows/console/README.md
+++ b/samples/csharp/dotnet-windows/console/README.md
@@ -6,10 +6,7 @@ It runs under the .NET Framework 4.6.1 (or above) on Windows.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC; some sample scenarios require a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed; some sample scenarios require a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
@@ -19,7 +16,7 @@ It runs under the .NET Framework 4.6.1 (or above) on Windows.
> **Note:** make sure to download the complete set of samples, not just this directory.
> This sample references code in the directory [../../sharedcontent/console](../../sharedcontent/console), which is shared across samples.
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* To tailor the sample to your configuration, use search and replace across the whole solution (for example, via **Edit** \> **Find and Replace** \> **Quick Replace**) to update the following strings:
diff --git a/samples/csharp/dotnet-windows/console/samples/packages.config b/samples/csharp/dotnet-windows/console/samples/packages.config
index 22e2761c2..ab28850a1 100644
--- a/samples/csharp/dotnet-windows/console/samples/packages.config
+++ b/samples/csharp/dotnet-windows/console/samples/packages.config
@@ -1,5 +1,5 @@
-
-
+
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/console/samples/samples.csproj b/samples/csharp/dotnet-windows/console/samples/samples.csproj
index 3a927ab6e..0e4a0ca26 100644
--- a/samples/csharp/dotnet-windows/console/samples/samples.csproj
+++ b/samples/csharp/dotnet-windows/console/samples/samples.csproj
@@ -153,9 +153,9 @@
1.7.1
-
-
-
+
+
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/speechtotext-naudio/README.md b/samples/csharp/dotnet-windows/speechtotext-naudio/README.md
index 891da8d20..43c0aab13 100644
--- a/samples/csharp/dotnet-windows/speechtotext-naudio/README.md
+++ b/samples/csharp/dotnet-windows/speechtotext-naudio/README.md
@@ -8,15 +8,12 @@ The sample runs on .NET Framework 4.7.2 (or above) on Windows.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2019](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
-* Start Microsoft Visual Studio 2019 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
diff --git a/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/packages.config b/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/packages.config
index 0cebe8324..3ba83fed2 100644
--- a/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/packages.config
+++ b/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/packages.config
@@ -1,6 +1,6 @@
-
+
diff --git a/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/speechtotext-naudio.csproj b/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/speechtotext-naudio.csproj
index a893679d1..7342fad18 100644
--- a/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/speechtotext-naudio.csproj
+++ b/samples/csharp/dotnet-windows/speechtotext-naudio/speechtotext-naudio/speechtotext-naudio.csproj
@@ -55,8 +55,8 @@
true
-
- ..\packages\Microsoft.CognitiveServices.Speech.1.41.1\lib\net462\Microsoft.CognitiveServices.Speech.csharp.dll
+
+ ..\packages\Microsoft.CognitiveServices.Speech.1.42.0\lib\net462\Microsoft.CognitiveServices.Speech.csharp.dll
..\packages\Microsoft.Win32.Registry.4.7.0\lib\net461\Microsoft.Win32.Registry.dll
@@ -106,11 +106,11 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/samples/csharp/dotnet-windows/speechtotext-wpf/README.md b/samples/csharp/dotnet-windows/speechtotext-wpf/README.md
index cd5d478d8..706b44ffb 100644
--- a/samples/csharp/dotnet-windows/speechtotext-wpf/README.md
+++ b/samples/csharp/dotnet-windows/speechtotext-wpf/README.md
@@ -9,16 +9,13 @@ The sample runs on .NET Framework 4.6.1 (or above) on Windows.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC; some sample scenarios require a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed; some sample scenarios require a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
diff --git a/samples/csharp/dotnet-windows/translation-wpf/README.md b/samples/csharp/dotnet-windows/translation-wpf/README.md
index 12e9a0b68..7974b8cc3 100644
--- a/samples/csharp/dotnet-windows/translation-wpf/README.md
+++ b/samples/csharp/dotnet-windows/translation-wpf/README.md
@@ -8,16 +8,13 @@ The sample runs on .NET Framework 4.6.1 (or above) on Windows.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **.NET desktop development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
diff --git a/samples/csharp/dotnetcore/console/README.md b/samples/csharp/dotnetcore/console/README.md
index bf7c71aaa..153fa3d9a 100644
--- a/samples/csharp/dotnetcore/console/README.md
+++ b/samples/csharp/dotnetcore/console/README.md
@@ -1,21 +1,12 @@
# C# Console app for .NET Core (Windows or Linux)
This sample demonstrates various forms of speech recognition, intent recognition, and translation using the Speech SDK for C#.
-It runs under .NET 6.0 or later on Windows or Linux (see the list of [supported Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux)).
+It runs under .NET 8.0 or later on Windows or Linux (see the list of [supported Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux)).
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution); some sample scenarios require a working microphone.
-* Either one of the following:
- * On Windows:
- * [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
- * The **.NET Core cross-platform development** workload in Visual Studio.
- You can enable it in **Tools** \> **Get Tools and Features**.
- * On Windows or Linux:
- * [.NET 6.0](https://dotnet.microsoft.com/download/dotnet/6.0)
- * On Windows you also need the [Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-csharp#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac); some sample scenarios require a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
@@ -25,8 +16,8 @@ It runs under .NET 6.0 or later on Windows or Linux (see the list of [supported
> **Note:** make sure to download the complete set of samples, not just this directory.
> This sample references code in the directory [../../sharedcontent/console](../../sharedcontent/console), which is shared across samples.
-* If you are using Microsoft Visual Studio 2017 on Windows:
- * Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* If you are using Microsoft Visual Studio on Windows:
+ * Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* To tailor the sample to your configuration, use search and replace across the whole solution (for example, in Visual Studio, via **Edit** \> **Find and Replace** \> **Quick Replace**) to update the following strings:
@@ -42,7 +33,7 @@ It runs under .NET 6.0 or later on Windows or Linux (see the list of [supported
* The following settings apply to keyword-triggered recognition:
* `YourKeywordRecognitionModelFile.table`: replace with the location of your keyword recognition model file.
* `YourKeyword`: replace with the phrase your keyword recognition model triggers on.
-* If you are using Microsoft Visual Studio 2017 on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
+* If you are using Microsoft Visual Studio on Windows, press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
* If you are using the .NET Core CLI, run the following command from the directory that contains this sample:
```bash
@@ -51,7 +42,7 @@ It runs under .NET 6.0 or later on Windows or Linux (see the list of [supported
## Run the sample
-### Using Visual Studio 2017
+### Using Visual Studio
To debug the app and then run it, press F5 or use **Debug** \> **Start Debugging**. To run the app without debugging, press Ctrl+F5 or use **Debug** \> **Start Without Debugging**.
@@ -64,7 +55,7 @@ Run the following command below from the directory that contains this sample.
(We assume you performed a Debug build earlier)
```bash
-cd samples/bin/Debug/net6.0
+cd samples/bin/Debug/net8.0
dotnet samples.dll
```
diff --git a/samples/csharp/dotnetcore/console/samples/samples.csproj b/samples/csharp/dotnetcore/console/samples/samples.csproj
index 612cf724f..e0fa705e7 100644
--- a/samples/csharp/dotnetcore/console/samples/samples.csproj
+++ b/samples/csharp/dotnetcore/console/samples/samples.csproj
@@ -2,7 +2,7 @@
Exe
- net6.0
+ net8.0
@@ -53,9 +53,9 @@
-
-
-
+
+
+
diff --git a/samples/csharp/dotnetcore/embedded-speech/README.md b/samples/csharp/dotnetcore/embedded-speech/README.md
index c3d275e8e..06a978b93 100644
--- a/samples/csharp/dotnetcore/embedded-speech/README.md
+++ b/samples/csharp/dotnetcore/embedded-speech/README.md
@@ -21,9 +21,9 @@ Requirements specific to embedded speech samples are as follows.
* Linux - `x64`, `ARM64`.
* macOS - `x64`, `ARM64`.
* If using Visual Studio (Windows, macOS):
- * [Microsoft Visual Studio 2022 or newer](https://www.visualstudio.com/) with **.NET 6.0**.
+ * [Microsoft Visual Studio 2022 or newer](https://www.visualstudio.com/) with **.NET 8.0**.
* If using the command line:
- * [.NET 6.0](https://learn.microsoft.com/dotnet/core/install/) (dotnet)
+ * [.NET 8.0](https://learn.microsoft.com/dotnet/core/install/) (dotnet)
* For speech recognition or translation:
* A working microphone device (and permission to access it for audio capture).
* One or more embedded speech recognition or translation models. See https://aka.ms/embedded-speech for the latest information on how to obtain embedded models.
@@ -144,11 +144,11 @@ Choose the scenarios that you are interested in.
Navigate to the folder containing this sample and run
```sh
-dotnet samples/bin/Debug/net6.0/samples.dll
+dotnet samples/bin/Debug/net8.0/samples.dll
```
or
```sh
-dotnet samples/bin/Release/net6.0/samples.dll
+dotnet samples/bin/Release/net8.0/samples.dll
```
according to the build configuration that was used.
diff --git a/samples/csharp/dotnetcore/embedded-speech/samples/samples.csproj b/samples/csharp/dotnetcore/embedded-speech/samples/samples.csproj
index 67e737adb..d144d392b 100644
--- a/samples/csharp/dotnetcore/embedded-speech/samples/samples.csproj
+++ b/samples/csharp/dotnetcore/embedded-speech/samples/samples.csproj
@@ -2,7 +2,7 @@
Exe
- net6.0
+ net8.0
true
$(TargetDir)
AnyCPU;x64
@@ -24,11 +24,11 @@
-
-
-
-
-
+
+
+
+
+
diff --git a/samples/csharp/maui/embedded-speech/README.md b/samples/csharp/maui/embedded-speech/README.md
index 68e7056fd..751f6611d 100644
--- a/samples/csharp/maui/embedded-speech/README.md
+++ b/samples/csharp/maui/embedded-speech/README.md
@@ -14,13 +14,13 @@ It is recommended to try them out before this MAUI specific sample.
## Prerequisites
-* [Supported platforms for MAUI application development](https://learn.microsoft.com/dotnet/maui/supported-platforms?view=net-maui-7.0).
+* [Supported platforms for MAUI application development](https://learn.microsoft.com/dotnet/maui/supported-platforms).
Embedded speech with MAUI currently supports only the following targets:
* Windows 10 or higher on x64 or ARM64 hardware.
* Android ARM-based devices with **Android 7.0 (API level 24)** or higher.
Embedded speech synthesis with neural voices is only supported on ARM64.
-* [Microsoft Visual Studio 2022](https://www.visualstudio.com/) with .NET MAUI workload and [.NET 7.0 or later](https://learn.microsoft.com/dotnet/core/install/windows?tabs=net70).
- See [prerequisites for MAUI](https://learn.microsoft.com/dotnet/maui/get-started/installation?view=net-maui-7.0&tabs=vswin).
+* [Microsoft Visual Studio 2022](https://www.visualstudio.com/) with .NET MAUI workload and [.NET 8.0 or later](https://learn.microsoft.com/dotnet/core/install/windows).
+ See [prerequisites for MAUI](https://learn.microsoft.com/dotnet/maui/get-started/installation).
* One or more embedded speech models. See https://aka.ms/embedded-speech for the latest information on how to obtain models.
## Prepare the sample
diff --git a/samples/csharp/maui/embedded-speech/embedded-speech/embedded-speech.csproj b/samples/csharp/maui/embedded-speech/embedded-speech/embedded-speech.csproj
index 3dbd15140..e156ec5d3 100644
--- a/samples/csharp/maui/embedded-speech/embedded-speech/embedded-speech.csproj
+++ b/samples/csharp/maui/embedded-speech/embedded-speech/embedded-speech.csproj
@@ -1,11 +1,11 @@
- net7.0-android
-
- $(TargetFrameworks);net7.0-windows10.0.19041.0
+ net8.0-android
+
+ $(TargetFrameworks);net8.0-windows10.0.19041.0
-
+
None
Exe
EmbeddedSpeechSample
@@ -32,7 +32,7 @@
-
+
manual
@@ -55,11 +55,12 @@
-
-
-
-
-
+
+
+
+
+
+
diff --git a/samples/csharp/maui/speech-to-text/README.md b/samples/csharp/maui/speech-to-text/README.md
index b8f4a9763..b736d28fc 100644
--- a/samples/csharp/maui/speech-to-text/README.md
+++ b/samples/csharp/maui/speech-to-text/README.md
@@ -8,10 +8,10 @@ This Speech SDK sample demonstrates how to use speech to text functionalities wi
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* [Supported platforms for MAUI application development](https://learn.microsoft.com/dotnet/maui/supported-platforms?view=net-maui-7.0)
+* [Supported platforms for MAUI application development](https://learn.microsoft.com/dotnet/maui/supported-platforms)
* [Microsoft Visual Studio 2022](https://www.visualstudio.com/), Community Edition or higher is preferred.
-* [.NET 7.0 or later](https://learn.microsoft.com/dotnet/core/install/windows?tabs=net70).
-* [Prerequisites for MAUI](https://learn.microsoft.com/dotnet/maui/get-started/installation?view=net-maui-7.0&tabs=vswin)
+* [.NET 8.0 or later](https://learn.microsoft.com/dotnet/core/install/windows).
+* [Prerequisites for MAUI](https://learn.microsoft.com/dotnet/maui/get-started/installation)
## Prepare the sample
@@ -42,7 +42,7 @@ This Speech SDK sample demonstrates how to use speech to text functionalities wi
is that speech will be transcribed to text in the application screen.
## Build and run the sample for iOS
-* Set the target machine to iOS device or emulator of your choice. Note that MAUI will need access to MAC device to use required Xcode tools before you can run the sample using iOS machine target. See the [Pair to Mac for iOS development](https://learn.microsoft.com/dotnet/maui/ios/pair-to-mac?view=net-maui-7.0)
+* Set the target machine to iOS device or emulator of your choice. Note that MAUI will need access to MAC device to use required Xcode tools before you can run the sample using iOS machine target. See the [Pair to Mac for iOS development](https://learn.microsoft.com/dotnet/maui/ios/pair-to-mac)
* Press Ctrl+Shift+B, or select **Build** \> **Build Solution**.
* To run the sample app, press F5 or click the selected iOS device or emulator, see **iOS simulators** or **iOS Remote or Local devices** in target device drop down selector.
* The application will request the microphone access which you need to allow in order to use the speech to text functionality using microphone as input.
@@ -50,7 +50,7 @@ This Speech SDK sample demonstrates how to use speech to text functionalities wi
is that speech will be transcribed to text in the application screen.
## Build and run the sample for Maccatalyst
-* Read the instructions here on how to build MAUI application on Mac [Build a Mac Catalyst app with .NET CLI](https://learn.microsoft.com/dotnet/maui/macos/cli?view=net-maui-7.0)
+* Read the instructions here on how to build MAUI application on Mac [Build a Mac Catalyst app with .NET CLI](https://learn.microsoft.com/dotnet/maui/mac-catalyst/cli)
* In order to run the sample on Mac, you need to ensure your Mac has been installed with required workloads for Android and Mac.
dotnet command will prompt with error and information if some of those workloads are missing.
* The application will request the microphone access which you need to allow in order to use the speech to text functionality using microphone as input.
diff --git a/samples/csharp/maui/speech-to-text/speech-to-text/speech-to-text.csproj b/samples/csharp/maui/speech-to-text/speech-to-text/speech-to-text.csproj
index 848b1246a..afe1f8e6a 100644
--- a/samples/csharp/maui/speech-to-text/speech-to-text/speech-to-text.csproj
+++ b/samples/csharp/maui/speech-to-text/speech-to-text/speech-to-text.csproj
@@ -4,7 +4,7 @@
net8.0-android;net8.0-ios;net8.0-maccatalyst
$(TargetFrameworks);net8.0-windows10.0.19041.0
-
+
None
Exe
speech_to_text
@@ -31,12 +31,12 @@
6.5
-
+
manual
-gcc_flags="-framework AudioToolbox"
-
+
manual
-gcc_flags="-framework AudioToolbox"
@@ -60,7 +60,8 @@
-
+
+
diff --git a/samples/csharp/sharedcontent/console/speech_diagnostics_logging_samples.cs b/samples/csharp/sharedcontent/console/speech_diagnostics_logging_samples.cs
index 615ded76e..5128cba2d 100644
--- a/samples/csharp/sharedcontent/console/speech_diagnostics_logging_samples.cs
+++ b/samples/csharp/sharedcontent/console/speech_diagnostics_logging_samples.cs
@@ -59,7 +59,7 @@ public static void FileLoggerWithFilter()
FileLogger.Stop();
// Clear filters
- EventLogger.SetFilters();
+ FileLogger.SetFilters();
// Now look at the log file that was created..
}
diff --git a/samples/csharp/tts-text-stream/console/TtsTextStreamSample.csproj b/samples/csharp/tts-text-stream/console/TtsTextStreamSample.csproj
index 75eaf9109..1100bb518 100644
--- a/samples/csharp/tts-text-stream/console/TtsTextStreamSample.csproj
+++ b/samples/csharp/tts-text-stream/console/TtsTextStreamSample.csproj
@@ -2,14 +2,14 @@
Exe
- net6.0
+ net8.0
enable
enable
-
+
diff --git a/samples/csharp/uwp/speechtotext-uwp/README.md b/samples/csharp/uwp/speechtotext-uwp/README.md
index cf59cab00..fd8eec28b 100644
--- a/samples/csharp/uwp/speechtotext-uwp/README.md
+++ b/samples/csharp/uwp/speechtotext-uwp/README.md
@@ -5,19 +5,13 @@ This sample demonstrates various forms of recognizing speech with C# under the U
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later.
- Some sample scenarios require a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio. You can enable it in **Tools** \> **Get Tools and Features**.
-* Note: processor target ARM is not yet supported.
-
-
+* A Windows PC with [Microsoft Visual Studio](https://www.visualstudio.com/) installed; some sample scenarios require a working microphone. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Set the active solution configuration and platform to the desired values under **Build** \> **Configuration Manager**:
* On a 64-bit Windows installation, choose `x64` as active solution platform.
diff --git a/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj b/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj
index 56105f48c..e8449c748 100644
--- a/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj
+++ b/samples/csharp/uwp/speechtotext-uwp/speechtotext-uwp/speechtotext-uwp.csproj
@@ -108,7 +108,7 @@
- 1.41.1
+ 1.42.0
6.2.8
diff --git a/samples/csharp/uwp/texttospeech-uwp/README.md b/samples/csharp/uwp/texttospeech-uwp/README.md
index 54b4150f6..9224b86a6 100644
--- a/samples/csharp/uwp/texttospeech-uwp/README.md
+++ b/samples/csharp/uwp/texttospeech-uwp/README.md
@@ -5,18 +5,13 @@ This sample demonstrates various forms of synthesizing speech with C# under the
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
-* Note: processor target ARM is not yet supported.
-
- You can enable it in **Tools** \> **Get Tools and Features**.
+* A Windows PC with with a working speaker/headset and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Set the active solution configuration and platform to the desired values under **Build** \> **Configuration Manager**:
* On a 64-bit Windows installation, choose `x64` as active solution platform.
diff --git a/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj b/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj
index 2aff6b975..c499bc9df 100644
--- a/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj
+++ b/samples/csharp/uwp/texttospeech-uwp/texttospeech-uwp/texttospeech-uwp.csproj
@@ -107,7 +107,7 @@
- 1.41.1
+ 1.42.0
6.2.8
diff --git a/samples/csharp/uwp/virtualassistant-uwp/README.md b/samples/csharp/uwp/virtualassistant-uwp/README.md
index c90c94187..793442e38 100644
--- a/samples/csharp/uwp/virtualassistant-uwp/README.md
+++ b/samples/csharp/uwp/virtualassistant-uwp/README.md
@@ -6,15 +6,13 @@ This sample demonstrates how to use Voice Assistants to recognize speech and rec
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
* A pre-configured bot created using Bot Framework version 4.2 or above. See [here for steps on how to create a bot](https://blog.botframework.com/2018/05/07/build-a-microsoft-bot-framework-bot-with-the-bot-builder-sdk-v4/). The bot would need to subscribe to the new [Direct Line Speech channel](https://docs.microsoft.com/azure/bot-service/bot-service-channel-connect-directlinespeech) to receive voice inputs.
-* A Windows PC with Windows 10 Fall Creators Update (10.0; Build 16299) or later, with a working microphone.
-* [Microsoft Visual Studio 2017](https://www.visualstudio.com/), Community Edition or higher.
-* The **Universal Windows Platform development** workload in Visual Studio.
+* A Windows PC with a working microphone and [Microsoft Visual Studio](https://www.visualstudio.com/) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-csharp) for details on system requirements and setup.
## Build the sample
* **By building this sample you will download the Microsoft Cognitive Services Speech SDK. By downloading you acknowledge its license, see [Speech SDK license agreement](https://aka.ms/csspeech/license).**
* [Download the sample code to your development PC.](/README.md#get-the-samples)
-* Start Microsoft Visual Studio 2017 and select **File** \> **Open** \> **Project/Solution**.
+* Start Microsoft Visual Studio and select **File** \> **Open** \> **Project/Solution**.
* Navigate to the folder containing this sample, and select the solution file contained within it.
* Set the active solution configuration and platform to the desired values under **Build** \> **Configuration Manager**:
* On a 64-bit Windows installation, choose `x64` as active solution platform.
diff --git a/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj b/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj
index 1751f828e..e1d72bc24 100644
--- a/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj
+++ b/samples/csharp/uwp/virtualassistant-uwp/VirtualAssistantPreview.csproj
@@ -165,7 +165,7 @@
4.3.2
- 1.41.1
+ 1.42.0
6.2.8
diff --git a/samples/csharp/web/avatar/Avatar.csproj b/samples/csharp/web/avatar/Avatar.csproj
index 32468e216..70b28df70 100644
--- a/samples/csharp/web/avatar/Avatar.csproj
+++ b/samples/csharp/web/avatar/Avatar.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/samples/custom-voice/README.md b/samples/custom-voice/README.md
index 42d80b836..3274e9620 100644
--- a/samples/custom-voice/README.md
+++ b/samples/custom-voice/README.md
@@ -1,8 +1,9 @@
+
# Examples to use Custom Voice
The Custom Voice API (Preview) is designed to create professional voice and personal voice. The functionality is exposed through a REST API and is easy to access from many programming languages.
-For a detailed explanation see the [custom neural voice documentation](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/custom-neural-voice) and the `README.md` in the language specific subdirectories.
+For a detailed explanation see the [custom neural voice documentation](https://learn.microsoft.com/azure/ai-services/speech-service/custom-neural-voice) and the `README.md` in the language specific subdirectories.
REST API doc: [custom voice REST API](https://learn.microsoft.com/rest/api/aiservices/speechapi/operation-groups?view=rest-aiservices-speechapi-2024-02-01-preview).
@@ -18,5 +19,5 @@ Available samples:
1. You need a Cognitive Services subscription key to run sample code here.
- You can get the subscription key from the "Keys and Endpoint" tab on your Cognitive Services or Speech resource in the Azure Portal.
- Custom Voice is only available for paid subscriptions, free subscriptions are not supported.
-2. Both professional voice and personal voice access are [limited](https://learn.microsoft.com/en-us/legal/cognitive-services/speech-service/custom-neural-voice/limited-access-custom-neural-voice?context=%2fazure%2fcognitive-services%2fspeech-service%2fcontext%2fcontext) based on eligibility and usage criteria. Please [request access](https://aka.ms/customneural) before using sample code here.
+2. Both professional voice and personal voice access are [limited](https://learn.microsoft.com/legal/cognitive-services/speech-service/custom-neural-voice/limited-access-custom-neural-voice?context=%2fazure%2fcognitive-services%2fspeech-service%2fcontext%2fcontext) based on eligibility and usage criteria. Please [request access](https://aka.ms/customneural) before using sample code here.
3. Personal voice is available in these regions: West Europe, East US, and South East Asia.
diff --git a/samples/custom-voice/csharp/CustomVoiceSample/CustomVoiceSample.csproj b/samples/custom-voice/csharp/CustomVoiceSample/CustomVoiceSample.csproj
index 87642918e..585ff57d9 100644
--- a/samples/custom-voice/csharp/CustomVoiceSample/CustomVoiceSample.csproj
+++ b/samples/custom-voice/csharp/CustomVoiceSample/CustomVoiceSample.csproj
@@ -1,13 +1,13 @@
Exe
- net6.0
+ net8.0
enable
-
+
diff --git a/samples/ingestion/ingestion-client/versions.nugets.props b/samples/ingestion/ingestion-client/versions.nugets.props
index c6d025fe1..a73352526 100644
--- a/samples/ingestion/ingestion-client/versions.nugets.props
+++ b/samples/ingestion/ingestion-client/versions.nugets.props
@@ -2,7 +2,7 @@
[1.1.0-beta.2]
[5.3.0]
- [12.20.0]
+ [12.23.0]
[3.4.3]
[3.4.3]
@@ -16,9 +16,9 @@
[1.2.0]
[1.17.4]
- [8.0.6]
- [8.0.6]
- [8.0.6]
+ [8.0.11]
+ [8.0.11]
+ [8.0.11]
diff --git a/samples/java/android/SpeechSynthesis/README.md b/samples/java/android/SpeechSynthesis/README.md
index 68edf1c4c..0e882a744 100644
--- a/samples/java/android/SpeechSynthesis/README.md
+++ b/samples/java/android/SpeechSynthesis/README.md
@@ -9,9 +9,9 @@ This sample demonstrates how to synthesize speech with Java using the Speech SDK
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* Version 4.2 or higher of [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/samples/java/android/SpeechSynthesis/app/build.gradle b/samples/java/android/SpeechSynthesis/app/build.gradle
index 56463b3ea..2c44662f2 100644
--- a/samples/java/android/SpeechSynthesis/app/build.gradle
+++ b/samples/java/android/SpeechSynthesis/app/build.gradle
@@ -28,7 +28,7 @@ android {
dependencies {
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.6.1'
implementation 'com.google.android.material:material:1.8.0'
diff --git a/samples/java/android/avatar/README.md b/samples/java/android/avatar/README.md
index eccbf1dcf..ccd30e4a1 100644
--- a/samples/java/android/avatar/README.md
+++ b/samples/java/android/avatar/README.md
@@ -5,9 +5,9 @@ This sample demonstrates usage of Azure text-to-speech avatar real-time API, wit
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* Version 4.2 or higher of [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* A PC (Windows, Linux, Mac) capable to run Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/samples/java/android/avatar/app/build.gradle b/samples/java/android/avatar/app/build.gradle
index 71579e3c8..136f54361 100644
--- a/samples/java/android/avatar/app/build.gradle
+++ b/samples/java/android/avatar/app/build.gradle
@@ -26,7 +26,7 @@ android {
dependencies {
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.7.0'
implementation 'com.google.android.material:material:1.12.0'
diff --git a/samples/java/android/compressed-input/README.md b/samples/java/android/compressed-input/README.md
index 7fba0d789..ca25812f0 100644
--- a/samples/java/android/compressed-input/README.md
+++ b/samples/java/android/compressed-input/README.md
@@ -5,9 +5,9 @@ This sample demonstrates how to recognize speech from compressed audio input wit
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* Version 4.2 or higher of [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone. Make sure that "USB debugging" is enabled on your device.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone. Make sure that "USB debugging" is enabled on your device.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/samples/java/android/compressed-input/app/build.gradle b/samples/java/android/compressed-input/app/build.gradle
index ab1efa874..a06398333 100644
--- a/samples/java/android/compressed-input/app/build.gradle
+++ b/samples/java/android/compressed-input/app/build.gradle
@@ -25,7 +25,7 @@ android {
dependencies {
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'com.google.android.material:material:1.4.0'
diff --git a/samples/java/android/embedded-speech/README.md b/samples/java/android/embedded-speech/README.md
index 27257ae88..b110e07a7 100644
--- a/samples/java/android/embedded-speech/README.md
+++ b/samples/java/android/embedded-speech/README.md
@@ -77,7 +77,7 @@ Do **not** add [client-sdk](https://mvnrepository.com/artifact/com.microsoft.cog
**Note:** Make sure that `@aar` suffix is used when the dependency is specified in `build.gradle`. For example,
```
dependencies {
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk-embedded:1.41.1@aar'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk-embedded:1.42.0@aar'
...
```
diff --git a/samples/java/android/embedded-speech/app/build.gradle b/samples/java/android/embedded-speech/app/build.gradle
index 3d3a65f32..10ee17e09 100644
--- a/samples/java/android/embedded-speech/app/build.gradle
+++ b/samples/java/android/embedded-speech/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk-embedded:1.41.1@aar'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk-embedded:1.42.0'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'
diff --git a/samples/java/android/sdkdemo/README.md b/samples/java/android/sdkdemo/README.md
index 6011094d1..d61fd9b34 100644
--- a/samples/java/android/sdkdemo/README.md
+++ b/samples/java/android/sdkdemo/README.md
@@ -9,9 +9,9 @@ This sample demonstrates how to recognize speech and intents with Java using the
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* Version 4.2 or higher of [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher; API 26 or higher for Pronunciation Assessment samples) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone. Make sure that "USB debugging" is enabled on your device.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device (API 26 or higher for Pronunciation Assessment samples) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone. Make sure that "USB debugging" is enabled on your device.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/samples/java/android/sdkdemo/app/build.gradle b/samples/java/android/sdkdemo/app/build.gradle
index 4054baaba..277a2b2fe 100644
--- a/samples/java/android/sdkdemo/app/build.gradle
+++ b/samples/java/android/sdkdemo/app/build.gradle
@@ -25,7 +25,7 @@ android {
dependencies {
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.41.1'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk:1.42.0'
// Diff lib for pronunciation assessment
implementation "io.github.java-diff-utils:java-diff-utils:4.11"
diff --git a/samples/java/jre/console/README.md b/samples/java/jre/console/README.md
index f43650aea..25454ac8e 100644
--- a/samples/java/jre/console/README.md
+++ b/samples/java/jre/console/README.md
@@ -2,16 +2,11 @@
This sample demonstrates various forms of speech recognition, intent recognition, speech synthesis, and translation using the Speech SDK for Java on Windows or Linux.
-> **Note:**
-> The Speech SDK for the JRE currently supports only the Windows x64 platform, and [specific Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux).
-
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows x64 or a supported Linux distribution) capable to run Eclipse,[[1]](#footnote1) some sample scenarios require a working microphone.
-* Java 8 or 11 JRE/JDK.
-* Version 4.8 of [Eclipse](https://www.eclipse.org), 64-bit.[[1]](#footnote1)
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
+* A PC (Windows, Linux, Mac) capable to run [Eclipse](https://www.eclipse.org),[[1]](#footnote1) some sample scenarios require a working microphone.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
1. This sample has not been verified with Eclipse on ARM platforms.
diff --git a/samples/java/jre/console/pom.xml b/samples/java/jre/console/pom.xml
index c5904afdd..e38b9bc42 100644
--- a/samples/java/jre/console/pom.xml
+++ b/samples/java/jre/console/pom.xml
@@ -56,7 +56,7 @@
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
jakarta.json
diff --git a/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/Main.java b/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/Main.java
index 8ed63d8cd..0c4ff4374 100644
--- a/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/Main.java
+++ b/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/Main.java
@@ -64,6 +64,12 @@ public static void main(String[] args) {
System.out.println("47. Continuous speech recognition from file, with at-start language detection.");
System.out.println("48. Continuous speech recognition from file, with at-start language detection with custom model.");
System.out.println("49. Continuous speech recognition from file, with continuous language detection with custom models.");
+ System.out.println("50. Diagnostics logging to file (without a filter).");
+ System.out.println("51. Diagnostics logging to file (with a filter).");
+ System.out.println("52. Diagnostics Subscribing to logging event (without a filter).");
+ System.out.println("53. Diagnostics Subscribing to logging event (with a filter).");
+ System.out.println("54. Diagnostics logging to memory buffer with logging level (with or without filter).");
+ System.out.println("55. Diagnostics logging to memory buffer with ingested self-defined SPX trace mark.");
System.out.print(prompt);
@@ -220,6 +226,24 @@ public static void main(String[] args) {
case "49":
SpeechRecognitionSamples.continuousRecognitionFromFileWithContinuousLanguageDetectionWithCustomModels();
break;
+ case "50":
+ SpeechDiagnosticsLoggingSamples.fileLoggerWithoutFilter();
+ break;
+ case "51":
+ SpeechDiagnosticsLoggingSamples.fileLoggerWithFilter();
+ break;
+ case "52":
+ SpeechDiagnosticsLoggingSamples.eventLoggerWithoutFilter();
+ break;
+ case "53":
+ SpeechDiagnosticsLoggingSamples.eventLoggerWithFilter();
+ break;
+ case "54":
+ SpeechDiagnosticsLoggingSamples.memoryLoggerWithOrWithoutFilter();
+ break;
+ case "55":
+ SpeechDiagnosticsLoggingSamples.selfDefinedSpxTraceLogging();
+ break;
case "0":
System.out.println("Exiting...");
break;
diff --git a/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/SpeechDiagnosticsLoggingSamples.java b/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/SpeechDiagnosticsLoggingSamples.java
new file mode 100644
index 000000000..5b43e49d7
--- /dev/null
+++ b/samples/java/jre/console/src/com/microsoft/cognitiveservices/speech/samples/console/SpeechDiagnosticsLoggingSamples.java
@@ -0,0 +1,226 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
+//
+
+package com.microsoft.cognitiveservices.speech.samples.console;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import com.microsoft.cognitiveservices.speech.*;
+import com.microsoft.cognitiveservices.speech.diagnostics.logging.EventLogger;
+import com.microsoft.cognitiveservices.speech.diagnostics.logging.FileLogger;
+import com.microsoft.cognitiveservices.speech.diagnostics.logging.MemoryLogger;
+import com.microsoft.cognitiveservices.speech.diagnostics.logging.SpxTrace;
+import com.microsoft.cognitiveservices.speech.diagnostics.logging.Level;
+
+// Shows how to enable Speech SDK trace logging. Microsoft may ask you to collect logs in order
+// to investigate an issue you reported
+@SuppressWarnings("resource") // scanner
+public class SpeechDiagnosticsLoggingSamples {
+
+ // Enable Speech SDK trace logging to a file
+ public static void fileLoggerWithoutFilter() throws InterruptedException, ExecutionException {
+ // Define the full path and name of the log file on your local disk
+ String logFile = "speech-sdk-log.txt";
+
+ // Start logging to the given file. By default it will create
+ // a new file, but you have the option of appending to an existing one
+ FileLogger.start(logFile);
+
+ // Do your Speech SDK calls here... for example:
+ try (SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ SpeechRecognizer recognizer = new SpeechRecognizer(config)) {
+
+ // ...
+
+ // Stop logging to the file
+ FileLogger.stop();
+ }
+
+ // Now look at the log file that was created..
+ }
+
+ // Enabled Speech SDK trace logging to a file with a filter
+ public static void fileLoggerWithFilter() throws InterruptedException, ExecutionException {
+ // Define the full path and name of the log file
+ String logFile = "speech-sdk-log.txt";
+
+ // Start logging to the given file, but Log only traces
+ // that contain either one of the filter strings provided.
+ // By default it will create a new file, but you have the option of
+ // appending to an existing one.
+ String[] filters = { "YourFirstString", "YourSecondString" };
+
+ FileLogger.setFilters(filters);
+ FileLogger.start(logFile);
+
+ // Do your Speech SDK calls here... for example:
+ try (SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ SpeechRecognizer recognizer = new SpeechRecognizer(config)) {
+
+ // ...
+
+ // Stop logging to the file
+ FileLogger.stop();
+ FileLogger.setFilters();
+ }
+
+ // Now look at the log file that was created..
+ }
+
+ // Enable Speech SDK trace logging to a subscribed event handler
+ public static void eventLoggerWithoutFilter() throws InterruptedException, ExecutionException {
+ final Object lockObject = new Object();
+ List messages = new ArrayList<>();
+
+ // Register a callback that will get invoked by Speech SDK on every new log message
+ EventLogger.setCallback((message) -> {
+ // Copy the string and store it for further processing. Do not do any processing in the event thread!
+ synchronized (lockObject) {
+ messages.add(message);
+ }
+ });
+
+ // Do your Speech SDK calls... for example:
+ try (SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ SpeechRecognizer recognizer = new SpeechRecognizer(config)) {
+
+ // ...
+
+ // Stop logging by setting an empty callback
+ EventLogger.setCallback();
+ }
+
+ // See resulting logs on the console
+ for (String message : messages) {
+ System.out.print(message);
+ }
+ }
+
+ // Enable Speech SDK trace logging to a subscribed event handler with a filter
+ public static void eventLoggerWithFilter() throws InterruptedException, ExecutionException {
+ final Object lockObject = new Object();
+ List messages = new ArrayList<>();
+
+ // Register a callback that will get invoked by Speech SDK on every new log message
+ EventLogger.setCallback((message) -> {
+ // Copy the string and store it for further processing. Do not do any processing in the event thread!
+ synchronized (lockObject) {
+ messages.add(message);
+ }
+ });
+
+ // Set an event filter, such that the callback will be invoked only for traces
+ // that contain either one of the filter strings provided.
+ String[] filters = { "YourFirstString", "YourSecondString" };
+ EventLogger.setFilters(filters);
+
+ // Do your Speech SDK calls... for example:
+ try (SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ SpeechRecognizer recognizer = new SpeechRecognizer(config)) {
+
+ // ...
+
+ // Stop logging by setting an empty callback
+ EventLogger.setCallback();
+ EventLogger.setFilters();
+ }
+
+ // See resulting logs on the console
+ for (String message : messages) {
+ System.out.print(message);
+ }
+ }
+
+ // Enable Speech SDK trace logging to memory buffer with or without a filter
+ public static void memoryLoggerWithOrWithoutFilter() throws InterruptedException, ExecutionException {
+ // Optional - Apply a filter, such that only traces that contain either one of the
+ // filter strings will be logged. Microsoft will provide the filter when relevant.
+
+ // String[] filters = { "YourFirstString", "YourSecondString" };
+ // MemoryLogger.setFilters(filters);
+
+ // Set the level of logging to be captured in memory and start logging
+ MemoryLogger.setLevel(Level.Info);
+ MemoryLogger.start();
+
+ // Do your Speech SDK calls here... for example:
+ try (SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ SpeechRecognizer recognizer = new SpeechRecognizer(config)) {
+
+ // Define the full path and name of a log file on your local disk
+ String logFile = "speech-sdk-log.txt";
+
+ // At any time (while still logging or after logging is stopped) you can dump the
+ // recent traces from memory to a file:
+ MemoryLogger.dump(logFile);
+
+ // Or dump to any object that is derived from java.io.Writer. For example, System.out,
+ // which will enable logging to your console windows:
+ try {
+ MemoryLogger.dump(System.out);
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+
+ // Or dump to a list of strings, and see the results on the console:
+ List messages = MemoryLogger.dump();
+
+ for (String message : messages) {
+ System.out.print(message);
+ }
+
+ // Stop logging to memory
+ MemoryLogger.stop();
+
+ // Optional - Reset the filters
+ // MemoryLogger.setFilters();
+ }
+ }
+
+ // Ingest self-defined trace into Speech SDK trace and log the mixed trace to memory buffer such that
+ // the self-defined trace can be used as markers to facliitate the investigation of the issue
+ public static void selfDefinedSpxTraceLogging() throws InterruptedException, ExecutionException {
+ // Set the level of logging to be captured in memory and start logging
+ MemoryLogger.setLevel(Level.Info);
+ MemoryLogger.start();
+
+ SpxTrace.SPX_TRACE_INFO("### This is my trace info -- START ###");
+
+ // Do your Speech SDK calls here... for example:
+ try (SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");
+ SpeechRecognizer recognizer = new SpeechRecognizer(config)) {
+
+ // Define the full path and name of a log file on your local disk
+ String logFile = "speech-sdk-log.txt";
+
+ SpxTrace.SPX_TRACE_INFO("### This is my trace info -- STOP ###");
+
+ // At any time (while still logging or after logging is stopped) you can dump the
+ // recent traces from memory to a file:
+ MemoryLogger.dump(logFile);
+
+ // Or dump to any object that is derived from java.io.Writer. For example, System.out,
+ // which will enable logging to your console windows:
+ try {
+ MemoryLogger.dump(System.out);
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+
+ // Or dump to a list of strings, and see the results on the console:
+ List messages = MemoryLogger.dump();
+
+ for (String message : messages) {
+ System.out.print(message);
+ }
+
+ // Stop logging to memory
+ MemoryLogger.stop();
+ }
+ }
+}
diff --git a/samples/java/jre/embedded-speech/pom.xml b/samples/java/jre/embedded-speech/pom.xml
index b7649efbe..4681444c0 100644
--- a/samples/java/jre/embedded-speech/pom.xml
+++ b/samples/java/jre/embedded-speech/pom.xml
@@ -56,7 +56,7 @@
com.microsoft.cognitiveservices.speech
client-sdk-embedded
- 1.41.1
+ 1.42.0
org.json
diff --git a/samples/js/node/package.json b/samples/js/node/package.json
index 9965c5f0f..8dfeef5d2 100644
--- a/samples/js/node/package.json
+++ b/samples/js/node/package.json
@@ -17,7 +17,7 @@
"lodash.foreach": "^4.5.0",
"lodash.sum": "^4.0.2",
"mic-to-speech": "^1.0.1",
- "microsoft-cognitiveservices-speech-sdk": "^1.41.0",
+ "microsoft-cognitiveservices-speech-sdk": "^1.42.0",
"readline": "^1.3.0",
"segment": "^0.1.3",
"wav": "^1.0.2"
diff --git a/samples/kotlin/android/continuous-reco/README.md b/samples/kotlin/android/continuous-reco/README.md
index 3ce3336c7..f1ca91c0f 100644
--- a/samples/kotlin/android/continuous-reco/README.md
+++ b/samples/kotlin/android/continuous-reco/README.md
@@ -9,9 +9,9 @@ This sample demonstrates how to use continuous recognition with Kotlin using the
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* Version 4.2 or higher of [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working microphone. Make sure that "USB debugging" is enabled on your device.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/samples/kotlin/android/continuous-reco/app/build.gradle b/samples/kotlin/android/continuous-reco/app/build.gradle
index 3a573ac10..f2d7c06ad 100644
--- a/samples/kotlin/android/continuous-reco/app/build.gradle
+++ b/samples/kotlin/android/continuous-reco/app/build.gradle
@@ -36,5 +36,5 @@ dependencies {
implementation 'androidx.appcompat:appcompat:1.4.2'
implementation 'com.google.android.material:material:1.6.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.4'
- implementation "com.microsoft.cognitiveservices.speech:client-sdk:1.41.1"
+ implementation "com.microsoft.cognitiveservices.speech:client-sdk:1.42.0"
}
\ No newline at end of file
diff --git a/samples/kotlin/android/tts-pause-example/README.md b/samples/kotlin/android/tts-pause-example/README.md
index 609f5567e..fda6e16b1 100644
--- a/samples/kotlin/android/tts-pause-example/README.md
+++ b/samples/kotlin/android/tts-pause-example/README.md
@@ -9,9 +9,9 @@ This sample demonstrates how to synthesize speech with Kotlin using the Speech S
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A PC (Windows, Linux, Mac) capable to run Android Studio.
-* Version 4.2 or higher of [Android Studio](https://developer.android.com/studio/).
-* An ARM-based Android device (API 23: Android 6.0 Marshmallow or higher) [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* A PC (Windows, Linux, Mac) capable to run [Android Studio](https://developer.android.com/studio/).
+* An ARM-based Android device [enabled for development](https://developer.android.com/studio/debug/dev-options) with a working speaker. Make sure that "USB debugging" is enabled on your device.
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-java) for details on system requirements and setup.
## Build the sample
diff --git a/samples/kotlin/android/tts-pause-example/app/build.gradle b/samples/kotlin/android/tts-pause-example/app/build.gradle
index bf2fcf5b5..2d9897f21 100644
--- a/samples/kotlin/android/tts-pause-example/app/build.gradle
+++ b/samples/kotlin/android/tts-pause-example/app/build.gradle
@@ -36,6 +36,6 @@ dependencies {
implementation 'com.google.android.material:material:1.6.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.4'
- implementation "com.microsoft.cognitiveservices.speech:client-sdk:1.41.1"
+ implementation "com.microsoft.cognitiveservices.speech:client-sdk:1.42.0"
}
\ No newline at end of file
diff --git a/samples/objective-c/ios/README.md b/samples/objective-c/ios/README.md
index bea0db078..7c1c4ff5f 100644
--- a/samples/objective-c/ios/README.md
+++ b/samples/objective-c/ios/README.md
@@ -25,7 +25,7 @@ The [synthesis sample app](./synthesis-samples) shows various other techniques t
## Prerequisites
- A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-- A Mac with Xcode installed as iOS development environment. These samples target iOS versions 11.0 or later.
+- A Mac with Xcode installed as iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the code for the samples
diff --git a/samples/objective-c/ios/compressed-streams/README.md b/samples/objective-c/ios/compressed-streams/README.md
index 4a7a52e9b..509c27683 100644
--- a/samples/objective-c/ios/compressed-streams/README.md
+++ b/samples/objective-c/ios/compressed-streams/README.md
@@ -7,7 +7,7 @@ The Speech SDK's **Compressed Audio Input Stream** API provides a way to stream
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A Mac with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later installed as iOS development environment. This tutorial targets iOS versions 9.2 or later.
+* A Mac with [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed as iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the Code for the Sample App
diff --git a/samples/objective-c/ios/speech-samples/speech-samples.xcodeproj/project.pbxproj b/samples/objective-c/ios/speech-samples/speech-samples.xcodeproj/project.pbxproj
index 2639a312d..359d64800 100644
--- a/samples/objective-c/ios/speech-samples/speech-samples.xcodeproj/project.pbxproj
+++ b/samples/objective-c/ios/speech-samples/speech-samples.xcodeproj/project.pbxproj
@@ -53,7 +53,7 @@
3C00A32B25F8727100512312 /* AudioRecorder.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AudioRecorder.m; sourceTree = ""; };
3C1B8BC92679C50600706BB3 /* pronunciation-assessment.wav */ = {isa = PBXFileReference; lastKnownFileType = audio.wav; path = "pronunciation-assessment.wav"; sourceTree = ""; };
52CF43E62AEF743E00227EF3 /* pronunciation_assessment_fall.wav */ = {isa = PBXFileReference; lastKnownFileType = audio.wav; path = pronunciation_assessment_fall.wav; sourceTree = ""; };
- 52FC64F929CACB27000C8918 /* MicrosoftCognitiveServicesSpeech.xcframework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcframework; name = MicrosoftCognitiveServicesSpeech.xcframework; path = "../../../../../../../../MicrosoftCognitiveServicesSpeech-XCFramework-1.41.1/MicrosoftCognitiveServicesSpeech.xcframework"; sourceTree = ""; };
+ 52FC64F929CACB27000C8918 /* MicrosoftCognitiveServicesSpeech.xcframework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcframework; name = MicrosoftCognitiveServicesSpeech.xcframework; path = "../../../../../../../../MicrosoftCognitiveServicesSpeech-XCFramework-1.42.0/MicrosoftCognitiveServicesSpeech.xcframework"; sourceTree = ""; };
DC2CBA03227047EA007EB18A /* wreck-a-nice-beach.wav */ = {isa = PBXFileReference; lastKnownFileType = audio.wav; name = "wreck-a-nice-beach.wav"; path = "./wreck-a-nice-beach.wav"; sourceTree = ""; };
F3184E46214674D60096193E /* speech-samples.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "speech-samples.app"; sourceTree = BUILT_PRODUCTS_DIR; };
F3184E49214674D60096193E /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; };
diff --git a/samples/objective-c/macos/speech-keyword-recognition/README.md b/samples/objective-c/macos/speech-keyword-recognition/README.md
index f5049bf7e..5295e2515 100644
--- a/samples/objective-c/macos/speech-keyword-recognition/README.md
+++ b/samples/objective-c/macos/speech-keyword-recognition/README.md
@@ -6,7 +6,7 @@ Note that a Speech SDK version 1.38.0 or later is required.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://learn.microsoft.com/azure/ai-services/speech-service/overview#get-started).
-* A macOS (10.14 or later) machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later.
+* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-objectivec) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/samples/objective-c/macos/speech-keyword-recognition/helloworld/Podfile b/samples/objective-c/macos/speech-keyword-recognition/helloworld/Podfile
index 6a8080e85..16d04b0a5 100644
--- a/samples/objective-c/macos/speech-keyword-recognition/helloworld/Podfile
+++ b/samples/objective-c/macos/speech-keyword-recognition/helloworld/Podfile
@@ -1,4 +1,4 @@
target 'helloworld' do
platform :osx, '10.13'
- pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.41.1'
+ pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.42.0'
end
diff --git a/samples/python/console/README.md b/samples/python/console/README.md
index b4a842fdd..496faad20 100644
--- a/samples/python/console/README.md
+++ b/samples/python/console/README.md
@@ -4,11 +4,7 @@ This sample demonstrates various forms of speech recognition, intent recognition
## Prerequisites
-* On Windows and Linux Python 3.7 or later needs to be installed. Downloads are available [here](https://www.python.org/downloads/).
-* The Python Speech SDK package is available for Windows (x64 and x86), Mac x64 (macOS X version 10.14 or later), Mac arm64 (macOS version 11.0 or later), and Linux (see the list of [supported Linux distributions and target architectures](https://docs.microsoft.com/azure/cognitive-services/speech-service/speech-sdk?tabs=linux)).
-* On Linux, see the [Linux platform requirements](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?tabs=linux&pivots=programming-language-java#platform-requirements) for installing the required dependencies.
-* On Windows you also need the [Microsoft Visual C++ Redistributable for Visual Studio 2017](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads) for your platform.
-
+* See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-python) for details on system requirements and setup.
## Build the sample
diff --git a/samples/python/console/speech_sample.py b/samples/python/console/speech_sample.py
index 9b2329fef..99c3a21c2 100644
--- a/samples/python/console/speech_sample.py
+++ b/samples/python/console/speech_sample.py
@@ -13,6 +13,8 @@
import threading
import wave
import utils
+import sys
+import io
try:
import azure.cognitiveservices.speech as speechsdk
@@ -23,9 +25,9 @@
https://docs.microsoft.com/azure/cognitive-services/speech-service/quickstart-python for
installation instructions.
""")
- import sys
sys.exit(1)
+sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# Set up the subscription info for the Speech Service:
# Replace with your own subscription key and service region (e.g., "westus").
@@ -899,9 +901,20 @@ def pronunciation_assessment_continuous_from_file():
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
audio_config = speechsdk.audio.AudioConfig(filename=zhcnlongfilename)
+<<<<<<< HEAD
+ reference_text = (
+ "秋天总是那么富有诗意。树叶渐渐变红,街道旁的银杏树也开始落叶。人们穿上厚重的外套,享受着凉爽的秋风。"
+ "黄昏时分,夕阳洒在街道上,给忙碌的一天增添了一抹温暖。无论是散步还是小憩,这个季节总能带来宁静和满足。"
+ "清晨,薄雾笼罩大地,空气中弥漫着一丝清新的凉意。中午阳光明媚,照在身上暖洋洋的,仿佛是一场心灵的抚慰。"
+ "傍晚时分,天空被染成了金黄和橙红,街上的行人脚步也不由得慢了下来,享受这份静谧和美好。你最喜欢哪个季节?"
+ )
+
+# Create pronunciation assessment config, set grading system, granularity and if enable miscue based on your requirement.
+=======
with open(zhcnlongtxtfilename, "r", encoding="utf-8") as t:
reference_text = t.readline()
# Create pronunciation assessment config, set grading system, granularity and if enable miscue based on your requirement.
+>>>>>>> 42ca78e6ea (pull public samples changes since 1.41 back to carbon master)
enable_miscue = True
enable_prosody_assessment = True
pronunciation_config = speechsdk.PronunciationAssessmentConfig(
@@ -923,8 +936,8 @@ def pronunciation_assessment_continuous_from_file():
prosody_scores = []
fluency_scores = []
durations = []
- startOffset = 0
- endOffset = 0
+ startOffset = None
+ endOffset = None
def stop_cb(evt: speechsdk.SessionEventArgs):
"""callback that signals to stop continuous recognition upon receiving an event `evt`"""
@@ -933,6 +946,7 @@ def stop_cb(evt: speechsdk.SessionEventArgs):
done = True
def recognized(evt: speechsdk.SpeechRecognitionEventArgs):
+ nonlocal startOffset, endOffset
print("pronunciation assessment for: {}".format(evt.result.text))
pronunciation_result = speechsdk.PronunciationAssessmentResult(evt.result)
print(" Accuracy score: {}, prosody score: {}, pronunciation score: {}, completeness score : {}, fluency score: {}".format(
@@ -947,8 +961,13 @@ def recognized(evt: speechsdk.SpeechRecognitionEventArgs):
json_result = evt.result.properties.get(speechsdk.PropertyId.SpeechServiceResponse_JsonResult)
jo = json.loads(json_result)
nb = jo["NBest"][0]
+<<<<<<< HEAD
+ durations.extend([int(w["Duration"]) + 100000 for w in nb["Words"] if w["PronunciationAssessment"]["ErrorType"] == "None"])
+ if startOffset is None:
+=======
durations.extend([int(w["Duration"]) + 100000 for w in nb["Words"]])
if startOffset == 0:
+>>>>>>> 42ca78e6ea (pull public samples changes since 1.41 back to carbon master)
startOffset = nb["Words"][0]["Offset"]
endOffset = nb["Words"][-1]["Offset"] + nb["Words"][-1]["Duration"] + 100000
@@ -957,7 +976,7 @@ def recognized(evt: speechsdk.SpeechRecognitionEventArgs):
# (Optional) get the session ID
speech_recognizer.session_started.connect(lambda evt: print(f"SESSION ID: {evt.session_id}"))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
- speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
+ speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt.cancellation_details)))
# Stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
@@ -1018,8 +1037,13 @@ def recognized(evt: speechsdk.SpeechRecognitionEventArgs):
prosody_score = sum(prosody_scores) / len(prosody_scores)
# Re-calculate fluency score
fluency_score = 0
+<<<<<<< HEAD
+ if startOffset is not None and endOffset is not None:
+ fluency_score = sum(durations) / (endOffset - startOffset) * 100
+=======
if startOffset > 0:
fluency_score = durations_sum / (endOffset - startOffset) * 100
+>>>>>>> 42ca78e6ea (pull public samples changes since 1.41 back to carbon master)
# Calculate whole completeness score
handled_final_words = [w.word for w in final_words if w.error_type != "Insertion"]
completeness_score = len([w for w in final_words if w.error_type == "None"]) / len(handled_final_words) * 100
diff --git a/samples/realtime-api-plus/poetry.lock b/samples/realtime-api-plus/poetry.lock
index 0c29f40a1..0242d185c 100644
--- a/samples/realtime-api-plus/poetry.lock
+++ b/samples/realtime-api-plus/poetry.lock
@@ -210,17 +210,17 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "azure-cognitiveservices-speech"
-version = "1.41.1"
+version = "1.42.0"
description = "Microsoft Cognitive Services Speech SDK for Python"
optional = false
python-versions = ">=3.7"
files = [
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:ea9d466f236598e37ea3dad1db203a2901ef91b407e435b59d9b22669324074d"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0f52f7852965bb2f5cf9aed0d3c6ef58238867bb6f0287eba95e42e1a513dd74"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:70030c6f1c875895eb985de3775f62349aa8687b6616afa9498466e281f178d3"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:94ddda0deb3a9fee58a0a781b09ab8ab95401c5daf9bfc9f84ce8134d3a77055"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-win32.whl", hash = "sha256:039eec52c0a549a30658fa24a06d42afc6366c47b03b961c0b6f730fd421293e"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-win_amd64.whl", hash = "sha256:13679949f52f89c263e8b1c6a2d0f384d663917c58b150772cf42b710a01321c"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:ea9d466f236598e37ea3dad1db203a2901ef91b407e435b59d9b22669324074d"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0f52f7852965bb2f5cf9aed0d3c6ef58238867bb6f0287eba95e42e1a513dd74"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:70030c6f1c875895eb985de3775f62349aa8687b6616afa9498466e281f178d3"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:94ddda0deb3a9fee58a0a781b09ab8ab95401c5daf9bfc9f84ce8134d3a77055"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-win32.whl", hash = "sha256:039eec52c0a549a30658fa24a06d42afc6366c47b03b961c0b6f730fd421293e"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-win_amd64.whl", hash = "sha256:13679949f52f89c263e8b1c6a2d0f384d663917c58b150772cf42b710a01321c"},
]
[[package]]
diff --git a/samples/realtime-api-plus/pyproject.toml b/samples/realtime-api-plus/pyproject.toml
index 9a4a92304..44e6ebe64 100644
--- a/samples/realtime-api-plus/pyproject.toml
+++ b/samples/realtime-api-plus/pyproject.toml
@@ -12,7 +12,7 @@ readme = "README.md"
[tool.poetry.dependencies]
python = "^3.12"
aiohttp = "^3.10.11"
-azure-cognitiveservices-speech = "^1.41.1"
+azure-cognitiveservices-speech = "^1.42.0"
numpy = "^2.1.2"
rtclient = {url = "https://github.com/Azure-Samples/aoai-realtime-audio-sdk/releases/download/py%2Fv0.5.2/rtclient-0.5.2-py3-none-any.whl"}
diff --git a/samples/swift/ios/README.md b/samples/swift/ios/README.md
index a84fa92c7..5db3ea6e1 100644
--- a/samples/swift/ios/README.md
+++ b/samples/swift/ios/README.md
@@ -12,7 +12,7 @@ This [speech sample app](./speech-samples) shows various other techniques, curre
## Prerequisites
- A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-- A Mac with Xcode installed as iOS development environment. These samples target iOS versions 11.0 or later.
+- A Mac with Xcode installed as iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the samples
diff --git a/samples/swift/ios/conversation-transcription/README.md b/samples/swift/ios/conversation-transcription/README.md
index f091db135..e616d1458 100644
--- a/samples/swift/ios/conversation-transcription/README.md
+++ b/samples/swift/ios/conversation-transcription/README.md
@@ -6,7 +6,7 @@ For an introduction to the SDK, please refer to the [quickstart articles for spe
## Prerequisites
- A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-- A Mac with Xcode installed as iOS development environment. The Speech SDK supports iOS versions 11.0 or later.
+- A Mac with Xcode installed as iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the samples
diff --git a/samples/swift/ios/embedded-speech/README.md b/samples/swift/ios/embedded-speech/README.md
index 6357ca427..59828aa1c 100644
--- a/samples/swift/ios/embedded-speech/README.md
+++ b/samples/swift/ios/embedded-speech/README.md
@@ -6,7 +6,7 @@ For an introduction to the embedded Speech SDK, please refer to the [article for
## Prerequisites
-- A Mac with Xcode installed as an iOS development environment. These samples target iOS versions 13.0 or later.
+- A Mac with Xcode installed as an iOS development environment. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the Code for the Samples
diff --git a/samples/swift/ios/from-external-microphone/README.md b/samples/swift/ios/from-external-microphone/README.md
index 92bdf16f9..6de548fe9 100644
--- a/samples/swift/ios/from-external-microphone/README.md
+++ b/samples/swift/ios/from-external-microphone/README.md
@@ -6,7 +6,7 @@ recorded from a microphone using AVFoundation APIs.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started).
-* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and [CocoaPods](https://cocoapods.org/) installed.
+* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/samples/swift/macos/speech-keyword-recognition/README.md b/samples/swift/macos/speech-keyword-recognition/README.md
index 9d723b885..79577302c 100644
--- a/samples/swift/macos/speech-keyword-recognition/README.md
+++ b/samples/swift/macos/speech-keyword-recognition/README.md
@@ -6,7 +6,7 @@ Note that a Speech SDK version 1.38.0 or later is required.
## Prerequisites
* A subscription key for the Speech service. See [Try the speech service for free](https://learn.microsoft.com/azure/ai-services/speech-service/overview#get-started).
-* A macOS (10.14 or later) machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) version 9.4.1 or later and [CocoaPods](https://cocoapods.org/) installed.
+* A macOS machine with a microphone and [Xcode](https://geo.itunes.apple.com/us/app/xcode/id497799835?mt=12) installed. See the [Speech SDK installation quickstart](https://learn.microsoft.com/azure/ai-services/speech-service/quickstarts/setup-platform?pivots=programming-language-swift) for details on system requirements and setup.
## Get the code for the sample app
diff --git a/samples/swift/macos/speech-keyword-recognition/helloworld/Podfile b/samples/swift/macos/speech-keyword-recognition/helloworld/Podfile
index abcdfb2fd..59f73334e 100644
--- a/samples/swift/macos/speech-keyword-recognition/helloworld/Podfile
+++ b/samples/swift/macos/speech-keyword-recognition/helloworld/Podfile
@@ -1,5 +1,5 @@
target 'helloworld' do
platform :osx, 10.14
- pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.41.1'
+ pod 'MicrosoftCognitiveServicesSpeech-macOS', '~> 1.42.0'
use_frameworks!
end
diff --git a/samples/video-translation/csharp/readme.md b/samples/video-translation/csharp/readme.md
index 63eb34f95..3bc95a9f9 100644
--- a/samples/video-translation/csharp/readme.md
+++ b/samples/video-translation/csharp/readme.md
@@ -44,6 +44,9 @@ Video translation client tool and API sample code
Do not upgrade Flurl to version 4.0 because it does not support NewtonJson for ReceiveJson.
+# Runn tool on Windows prerequisite:
+ [Install dotnet 8.0](https://dotnet.microsoft.com/en-us/download/dotnet/8.0)
+
# Command Line Usage
| Description | Command line arguments |
| ------------ | -------------- |
diff --git a/scenarios/cpp/windows/captioning/captioning/captioning.vcxproj b/scenarios/cpp/windows/captioning/captioning/captioning.vcxproj
index 592c8de38..5e0b8c7d6 100644
--- a/scenarios/cpp/windows/captioning/captioning/captioning.vcxproj
+++ b/scenarios/cpp/windows/captioning/captioning/captioning.vcxproj
@@ -159,12 +159,12 @@
-
+
This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
-
+
\ No newline at end of file
diff --git a/scenarios/cpp/windows/captioning/captioning/packages.config b/scenarios/cpp/windows/captioning/captioning/packages.config
index e51eea91b..b60229a48 100644
--- a/scenarios/cpp/windows/captioning/captioning/packages.config
+++ b/scenarios/cpp/windows/captioning/captioning/packages.config
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/scenarios/csharp/dotnetcore/call-center/call-center/call-center.csproj b/scenarios/csharp/dotnetcore/call-center/call-center/call-center.csproj
index 07c408b5b..ac3081f6f 100644
--- a/scenarios/csharp/dotnetcore/call-center/call-center/call-center.csproj
+++ b/scenarios/csharp/dotnetcore/call-center/call-center/call-center.csproj
@@ -2,7 +2,7 @@
Exe
- net6.0
+ net8.0
enable
x64;x86
diff --git a/scenarios/csharp/dotnetcore/captioning/captioning/captioning.csproj b/scenarios/csharp/dotnetcore/captioning/captioning/captioning.csproj
index 15470d50b..8092df6b9 100644
--- a/scenarios/csharp/dotnetcore/captioning/captioning/captioning.csproj
+++ b/scenarios/csharp/dotnetcore/captioning/captioning/captioning.csproj
@@ -2,13 +2,13 @@
Exe
- net6.0
+ net8.0
enable
x64;x86
-
+
\ No newline at end of file
diff --git a/scenarios/full-duplex-bot/poetry.lock b/scenarios/full-duplex-bot/poetry.lock
index fb26e1053..1aeea3969 100644
--- a/scenarios/full-duplex-bot/poetry.lock
+++ b/scenarios/full-duplex-bot/poetry.lock
@@ -33,17 +33,17 @@ trio = ["trio (>=0.26.1)"]
[[package]]
name = "azure-cognitiveservices-speech"
-version = "1.41.1"
+version = "1.42.0"
description = "Microsoft Cognitive Services Speech SDK for Python"
optional = false
python-versions = ">=3.7"
files = [
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:ea9d466f236598e37ea3dad1db203a2901ef91b407e435b59d9b22669324074d"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0f52f7852965bb2f5cf9aed0d3c6ef58238867bb6f0287eba95e42e1a513dd74"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:70030c6f1c875895eb985de3775f62349aa8687b6616afa9498466e281f178d3"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:94ddda0deb3a9fee58a0a781b09ab8ab95401c5daf9bfc9f84ce8134d3a77055"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-win32.whl", hash = "sha256:039eec52c0a549a30658fa24a06d42afc6366c47b03b961c0b6f730fd421293e"},
- {file = "azure_cognitiveservices_speech-1.41.1-py3-none-win_amd64.whl", hash = "sha256:13679949f52f89c263e8b1c6a2d0f384d663917c58b150772cf42b710a01321c"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-macosx_10_14_x86_64.whl", hash = "sha256:ea9d466f236598e37ea3dad1db203a2901ef91b407e435b59d9b22669324074d"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0f52f7852965bb2f5cf9aed0d3c6ef58238867bb6f0287eba95e42e1a513dd74"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:70030c6f1c875895eb985de3775f62349aa8687b6616afa9498466e281f178d3"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:94ddda0deb3a9fee58a0a781b09ab8ab95401c5daf9bfc9f84ce8134d3a77055"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-win32.whl", hash = "sha256:039eec52c0a549a30658fa24a06d42afc6366c47b03b961c0b6f730fd421293e"},
+ {file = "azure_cognitiveservices_speech-1.42.0-py3-none-win_amd64.whl", hash = "sha256:13679949f52f89c263e8b1c6a2d0f384d663917c58b150772cf42b710a01321c"},
]
[[package]]
diff --git a/scenarios/full-duplex-bot/pyproject.toml b/scenarios/full-duplex-bot/pyproject.toml
index d4cadf822..7ddec3825 100644
--- a/scenarios/full-duplex-bot/pyproject.toml
+++ b/scenarios/full-duplex-bot/pyproject.toml
@@ -8,7 +8,7 @@ readme = "README.md"
[tool.poetry.dependencies]
python = "^3.12"
-azure-cognitiveservices-speech = "^1.41.1"
+azure-cognitiveservices-speech = "^1.42.0"
azure-identity = "^1.19.0"
fastapi = "^0.115.5"
jinja2 = "^3.1.5"
diff --git a/scenarios/java/jre/console/captioning/pom.xml b/scenarios/java/jre/console/captioning/pom.xml
index 2619e06fa..ccf0c6134 100644
--- a/scenarios/java/jre/console/captioning/pom.xml
+++ b/scenarios/java/jre/console/captioning/pom.xml
@@ -46,7 +46,7 @@ mvn clean dependency:copy-dependencies
com.microsoft.cognitiveservices.speech
client-sdk
- 1.41.1
+ 1.42.0
\ No newline at end of file
From 3dca873f02a92a1148c291f977c94bc8dbc6c28d Mon Sep 17 00:00:00 2001
From: Brian Mouncer <>
Date: Wed, 8 Jan 2025 19:44:36 -0800
Subject: [PATCH 2/2] fix bad version change from automated per script.
---
samples/java/android/embedded-speech/app/build.gradle | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/samples/java/android/embedded-speech/app/build.gradle b/samples/java/android/embedded-speech/app/build.gradle
index 10ee17e09..606c6238f 100644
--- a/samples/java/android/embedded-speech/app/build.gradle
+++ b/samples/java/android/embedded-speech/app/build.gradle
@@ -25,7 +25,7 @@ dependencies {
implementation fileTree(include: ['*.jar'], dir: 'libs')
// Speech SDK
- implementation 'com.microsoft.cognitiveservices.speech:client-sdk-embedded:1.42.0'
+ implementation 'com.microsoft.cognitiveservices.speech:client-sdk-embedded:1.42.0@aar'
implementation 'androidx.appcompat:appcompat:1.3.1'
implementation 'androidx.constraintlayout:constraintlayout:2.1.0'