Skip to content

Commit

Permalink
change specified voice (#2189)
Browse files Browse the repository at this point in the history
Co-authored-by: peterpan <[email protected]>
Co-authored-by: Yulin Li <[email protected]>
  • Loading branch information
3 people authored Jan 4, 2024
1 parent fdc0644 commit e7df5f7
Show file tree
Hide file tree
Showing 16 changed files with 22 additions and 22 deletions.
2 changes: 1 addition & 1 deletion samples/batch-avatar/python/synthesis.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def submit_synthesis():
'description': DESCRIPTION,
"textType": "PlainText",
'synthesisConfig': {
"voice": "en-US-JennyNeural",
"voice": "en-US-AndrewNeural",
},
# Replace with your custom voice name and deployment ID if you want to use custom voice.
# Multiple voices are supported, the mixture of custom voices and platform voices is allowed.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

// Create a new synthesis task with plain text
var newSynthesisUri = await synthesisClient.CreateSynthesisAsync(
"en-US-JennyNeural",
"en-US-AndrewNeural",
"sample batch synthesis",
"sample description",
sampleScript,
Expand Down
2 changes: 1 addition & 1 deletion samples/batch-synthesis/python/synthesis.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def submit_synthesis():
'description': DESCRIPTION,
"textType": "PlainText",
'synthesisConfig': {
"voice": "en-US-JennyNeural",
"voice": "en-US-AndrewNeural",
},
# Replace with your custom voice name and deployment ID if you want to use custom voice.
# Multiple voices are supported, the mixture of custom voices and platform voices is allowed.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,11 @@ void SpeechSynthesisWithVoice()
auto config = SpeechConfig::FromSubscription("YourSubscriptionKey", "YourServiceRegion");

// Sets the voice name.
// e.g. "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)".
// e.g. "en-US-AndrewNeural".
// The full list of supported voices can be found here:
// https://aka.ms/csspeech/voicenames
// And, you can try GetVoicesAsync method to get all available voices (see SpeechSynthesisGetAvailableVoices() sample below).
auto voice = "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)";
auto voice = "en-US-AndrewNeural";
config->SetSpeechSynthesisVoiceName(voice);

// Creates a speech synthesizer for the specified voice, using the default speaker as audio output.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,11 +115,11 @@ public static async Task SynthesisWithVoiceAsync()
var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

// Sets the voice name.
// e.g. "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)".
// e.g. "en-US-AndrewNeural".
// The full list of supported voices can be found here:
// https://aka.ms/csspeech/voicenames
// And, you can try GetVoicesAsync method to get all available voices (see SynthesisGetAvailableVoicesAsync() sample below).
var voice = "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)";
var voice = "en-US-AndrewNeural";
config.SpeechSynthesisVoiceName = voice;

// Creates a speech synthesizer for the specified voice, using the default speaker as audio output.
Expand Down Expand Up @@ -971,7 +971,7 @@ public static async Task SynthesizeOnceUseCustomVoiceToSpeakerAsyncSwitchPlatfor
|| details.ErrorCode == CancellationErrorCode.ServiceTimeout
|| details.ErrorDetails.Contains("Error code: 1007"))
{
// Synthesize using a standard platform voice, e.g. en-US-JennyNeural
// Synthesize using a standard platform voice, e.g. en-US-AndrewNeural
synthesisResult = await SynthesizeOnceAsyncInternal("YourSubscriptionKey", "YourServiceRegion", null, "YourPlatformVoiceName");
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ public class SpeechSynthesisServerScenarioSample
public static void SpeechSynthesizeWithPool()
{
SynthesisServer server = new SynthesisServer(subscriptionKey, region,
"en-US-JennyNeural", SpeechSynthesisOutputFormat.Audio24Khz48KBitRateMonoMp3, concurrency);
"en-US-AndrewNeural", SpeechSynthesisOutputFormat.Audio24Khz48KBitRateMonoMp3, concurrency);

for (var turn = 0; turn < 3; turn++)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ public void onCreateSynthesizerButtonClicked(View v) {
// Use 24k Hz format for higher quality.
speechConfig.setSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Raw24Khz16BitMonoPcm);
// Set voice name.
speechConfig.setSpeechSynthesisVoiceName("en-US-JennyNeural");
speechConfig.setSpeechSynthesisVoiceName("en-US-AndrewNeural");
synthesizer = new SpeechSynthesizer(speechConfig, null);
connection = Connection.fromSpeechSynthesizer(synthesizer);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,11 +126,11 @@ public static void synthesisWithVoiceAsync() throws InterruptedException, Execut
SpeechConfig config = SpeechConfig.fromSubscription("YourSubscriptionKey", "YourServiceRegion");

// Sets the voice name.
// e.g. "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)".
// e.g. "en-US-AndrewNeural".
// The full list of supported voices can be found here:
// https://aka.ms/csspeech/voicenames
// And, you can try getVoicesAsync method to get all available voices (see synthesisGetAvailableVoicesAsync() sample below).
String voice = "en-US-JennyNeural";
String voice = "en-US-AndrewNeural";
config.setSpeechSynthesisVoiceName(voice);

// Creates a speech synthesizer using the default speaker as audio output.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public SpeechSynthesisService() {
/**
* A thread-safe method to synthesize content
* @param content The text to synthesize
* @param voice The voice name, e.g. en-US-JennyNeural
* @param voice The voice name, e.g. en-US-AndrewNeural
* @return The first byte latency and processing time, in millisecond.
*/
public long[] synthesis(String content, String voice) {
Expand Down Expand Up @@ -149,7 +149,7 @@ public static void synthesisServerScenarioAsync() throws InterruptedException {
System.out.printf("Turn: %d%n", finalTurn);

IntStream.range(0, 64).parallel().forEach(i -> {
long[] latency = service.synthesis(String.format("today is a nice day. %d%d", finalTurn, i), "en-US-JennyNeural");
long[] latency = service.synthesis(String.format("today is a nice day. %d%d", finalTurn, i), "en-US-AndrewNeural");
if (finalTurn > 0) {
latencies.add(latency[0]);
processingTimes.add(latency[1]);
Expand Down
2 changes: 1 addition & 1 deletion samples/js/browser/public/synthesis.html
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ <h1 style="font-weight:500;">Speech Speech SDK not found
request.onload = function() {
if (request.status >= 200 && request.status < 400) {
const response = this.response;
const defaultVoice = "JennyNeural";
const defaultVoice = "AndrewNeural";
let selectId;
const data = JSON.parse(response);
voiceOptions.innerHTML = "";
Expand Down
2 changes: 1 addition & 1 deletion samples/js/browser/synthesis.html
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ <h1 style="font-weight:500;">Speech Speech SDK not found
request.onload = function() {
if (request.status >= 200 && request.status < 400) {
const response = this.response;
const defaultVoice = "JennyNeural";
const defaultVoice = "AndrewNeural";
let selectId;
const data = JSON.parse(response);
voiceOptions.innerHTML = "";
Expand Down
2 changes: 1 addition & 1 deletion samples/js/node/synthesis.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ export const main = (settings, filename) => {
// setting the synthesis language, voice name, and output audio format.
// see https://aka.ms/speech/tts-languages for available languages and voices
speechConfig.speechSynthesisLanguage = settings.language;
speechConfig.speechSynthesisVoiceName = "en-US-JennyNeural";
speechConfig.speechSynthesisVoiceName = "en-US-AndrewNeural";
speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3;

var rl = readline.createInterface({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class MainActivity : AppCompatActivity() {
// Use 24k Hz format for higher quality.
speechConfig?.setSpeechSynthesisOutputFormat(SpeechSynthesisOutputFormat.Raw24Khz16BitMonoPcm)
// Set voice name.
speechConfig?.speechSynthesisVoiceName = "en-US-JennyNeural"
speechConfig?.speechSynthesisVoiceName = "en-US-AndrewNeural"
synthesizer = SpeechSynthesizer(speechConfig, null)
connection = Connection.fromSpeechSynthesizer(synthesizer)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ - (void)synthesisToSpeaker {
// https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support#text-to-speech
speechConfig.speechSynthesisLanguage = @"en-GB";
// Sets the voice name
// e.g. "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)".
// e.g. "en-US-AndrewNeural".
// The full list of supported voices can be found here:
// https://aka.ms/csspeech/voicenames
// And, you can try getVoices method to get all available voices.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

class LongTextSynthesizer:
def __init__(self, subscription: str, region: str, language: str = 'english',
voice: str = 'en-US-JennyNeural', parallel_threads: int = 8) -> None:
voice: str = 'en-US-AndrewNeural', parallel_threads: int = 8) -> None:
self.is_ssml = None
self.subscription = subscription
self.region = region
Expand Down
4 changes: 2 additions & 2 deletions samples/python/console/speech_synthesis_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,12 @@ def speech_synthesis_with_voice():
# Creates an instance of a speech config with specified subscription key and service region.
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# Sets the synthesis voice name.
# e.g. "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)".
# e.g. "en-US-AndrewNeural".
# The full list of supported voices can be found here:
# https://aka.ms/csspeech/voicenames
# And, you can try get_voices_async method to get all available voices.
# See speech_synthesis_get_available_voices() sample below.
voice = "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)"
voice = "en-US-AndrewNeural"
speech_config.speech_synthesis_voice_name = voice
# Creates a speech synthesizer for the specified voice,
# using the default speaker as audio output.
Expand Down

0 comments on commit e7df5f7

Please sign in to comment.