-
Notifications
You must be signed in to change notification settings - Fork 1.9k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
initial changes to allow sample js to work with ESM v1.33.1 (#2132)
- Loading branch information
Showing
25 changed files
with
975 additions
and
1,064 deletions.
There are no files selected for viewing
110 changes: 53 additions & 57 deletions
110
quickstart/javascript/node/conversation-transcription/index.js
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,60 +1,56 @@ | ||
// Copyright (c) Microsoft Corporation. All rights reserved. | ||
// Licensed under the MIT license. | ||
|
||
(function() { | ||
"use strict"; | ||
|
||
// pull in the required packages. | ||
var sdk = require("microsoft-cognitiveservices-speech-sdk"); | ||
var fs = require("fs"); | ||
|
||
// replace with your own subscription key, | ||
// service region (e.g., "centralus"), and | ||
// the name of the file you want to transcribe | ||
// through the conversation transcriber. | ||
var subscriptionKey = "YourSubscriptionKey"; | ||
var serviceRegion = "YourServiceRegion"; // e.g., "centralus" | ||
var filename = "YourAudioFile.wav"; | ||
|
||
// create the push stream we need for the speech sdk. | ||
var pushStream = sdk.AudioInputStream.createPushStream(); | ||
|
||
// open the file and push it to the push stream. | ||
fs.createReadStream(filename).on('data', function(arrayBuffer) { | ||
pushStream.write(arrayBuffer.slice()); | ||
}).on('end', function() { | ||
pushStream.close(); | ||
}); | ||
|
||
// we are done with the setup | ||
console.log("Transcribing from: " + filename); | ||
// now create the audio-config pointing to our stream and | ||
// the speech config specifying the language. | ||
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion); | ||
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream); | ||
|
||
// create the conversation transcriber. | ||
var transcriber = new sdk.ConversationTranscriber(speechConfig, audioConfig); | ||
|
||
transcriber.sessionStarted = function(s, e) { | ||
console.log("(sessionStarted) SessionId:" + e.sessionId); | ||
}; | ||
transcriber.sessionStopped = function(s, e) { | ||
console.log("(sessionStopped) SessionId:" + e.sessionId); | ||
}; | ||
transcriber.canceled = function(s, e) { | ||
console.log("(canceled) " + e.errorDetails); | ||
}; | ||
transcriber.transcribed = function(s, e) { | ||
console.log("(transcribed) text: " + e.result.text); | ||
console.log("(transcribed) speakerId: " + e.result.speakerId); | ||
}; | ||
|
||
// Begin conversation transcription | ||
transcriber.startTranscribingAsync( | ||
function () {}, | ||
function (err) { | ||
console.trace("err - starting transcription: " + err); | ||
} | ||
); | ||
}()); | ||
// pull in the required packages. | ||
import * as sdk from "microsoft-cognitiveservices-speech-sdk"; | ||
import * as fs from "fs"; | ||
|
||
// replace with your own subscription key, | ||
// service region (e.g., "centralus"), and | ||
// the name of the file you want to transcribe | ||
// through the conversation transcriber. | ||
var subscriptionKey = "YourSubscriptionKey"; | ||
var serviceRegion = "YourServiceRegion"; // e.g., "centralus" | ||
var filename = "YourAudioFile.wav"; | ||
|
||
// create the push stream we need for the speech sdk. | ||
var pushStream = sdk.AudioInputStream.createPushStream(); | ||
|
||
// open the file and push it to the push stream. | ||
fs.createReadStream(filename).on('data', function(arrayBuffer) { | ||
pushStream.write(arrayBuffer.slice()); | ||
}).on('end', function() { | ||
pushStream.close(); | ||
}); | ||
|
||
// we are done with the setup | ||
console.log("Transcribing from: " + filename); | ||
// now create the audio-config pointing to our stream and | ||
// the speech config specifying the language. | ||
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion); | ||
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream); | ||
|
||
// create the conversation transcriber. | ||
var transcriber = new sdk.ConversationTranscriber(speechConfig, audioConfig); | ||
|
||
transcriber.sessionStarted = function(s, e) { | ||
console.log("(sessionStarted) SessionId:" + e.sessionId); | ||
}; | ||
transcriber.sessionStopped = function(s, e) { | ||
console.log("(sessionStopped) SessionId:" + e.sessionId); | ||
}; | ||
transcriber.canceled = function(s, e) { | ||
console.log("(canceled) " + e.errorDetails); | ||
}; | ||
transcriber.transcribed = function(s, e) { | ||
console.log("(transcribed) text: " + e.result.text); | ||
console.log("(transcribed) speakerId: " + e.result.speakerId); | ||
}; | ||
|
||
// Begin conversation transcription | ||
transcriber.startTranscribingAsync( | ||
function () {}, | ||
function (err) { | ||
console.trace("err - starting transcription: " + err); | ||
} | ||
); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
163 changes: 79 additions & 84 deletions
163
quickstart/javascript/node/meeting-transcription/index.js
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,99 +1,94 @@ | ||
// Copyright (c) Microsoft Corporation. All rights reserved. | ||
// Licensed under the MIT license. | ||
|
||
(function() { | ||
"use strict"; | ||
|
||
// pull in the required packages. | ||
var sdk = require("microsoft-cognitiveservices-speech-sdk"); | ||
var fs = require("fs"); | ||
|
||
// replace with your own subscription key, | ||
// service region (e.g., "centralus"), and | ||
// the name of the file you want to transcribe | ||
// through the meeting transcriber. | ||
var subscriptionKey = "YourSubscriptionKey"; | ||
var serviceRegion = "YourServiceRegion"; // e.g., "centralus" | ||
var filename = "YourAudioFile.wav"; // 8-channel audio | ||
|
||
// create the push stream we need for the speech sdk. | ||
var pushStream = sdk.AudioInputStream.createPushStream(sdk.AudioStreamFormat.getWaveFormatPCM(16000, 16, 8)) | ||
|
||
// open the file and push it to the push stream. | ||
fs.createReadStream(filename).on('data', function(arrayBuffer) { | ||
pushStream.write(arrayBuffer.slice()); | ||
}).on('end', function() { | ||
pushStream.close(); | ||
}); | ||
|
||
// we are done with the setup | ||
console.log("Transcribing from: " + filename); | ||
// now create the audio-config pointing to our stream and | ||
// the speech config specifying the language. | ||
var speechTranslationConfig = sdk.SpeechTranslationConfig.fromSubscription(subscriptionKey, serviceRegion); | ||
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream); | ||
import * as sdk from "microsoft-cognitiveservices-speech-sdk"; | ||
import * as fs from "fs"; | ||
|
||
// replace with your own subscription key, | ||
// service region (e.g., "centralus"), and | ||
// the name of the file you want to transcribe | ||
// through the meeting transcriber. | ||
var subscriptionKey = "YourSubscriptionKey"; | ||
var serviceRegion = "YourServiceRegion"; // e.g., "centralus" | ||
var filename = "YourAudioFile.wav"; // 8-channel audio | ||
|
||
// create the push stream we need for the speech sdk. | ||
var pushStream = sdk.AudioInputStream.createPushStream(sdk.AudioStreamFormat.getWaveFormatPCM(16000, 16, 8)) | ||
|
||
// setting the recognition language to English. | ||
speechTranslationConfig.speechRecognitionLanguage = "en-US"; | ||
// open the file and push it to the push stream. | ||
fs.createReadStream(filename).on('data', function(arrayBuffer) { | ||
pushStream.write(arrayBuffer.slice()); | ||
}).on('end', function() { | ||
pushStream.close(); | ||
}); | ||
|
||
// create the meeting object tracking participants | ||
var meeting = sdk.Meeting.createMeetingAsync(speechTranslationConfig, "myMeeting"); | ||
// we are done with the setup | ||
console.log("Transcribing from: " + filename); | ||
// now create the audio-config pointing to our stream and | ||
// the speech config specifying the language. | ||
var speechTranslationConfig = sdk.SpeechTranslationConfig.fromSubscription(subscriptionKey, serviceRegion); | ||
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream); | ||
|
||
// create the meeting transcriber. | ||
var transcriber = new sdk.MeetingTranscriber(audioConfig); | ||
// setting the recognition language to English. | ||
speechTranslationConfig.speechRecognitionLanguage = "en-US"; | ||
|
||
// attach the transcriber to the meeting | ||
transcriber.joinMeetingAsync(meeting, | ||
function () { | ||
// add first participant with voice signature from enrollment step | ||
var voiceSignatureUser1 = "{" + | ||
"Version: 0," + | ||
"Tag: \"<<VOICE_TAG_HERE>>\"," + | ||
"Data: \"<<VOICE_DATA_HERE>>\"" + | ||
"}"; | ||
var user1 = sdk.Participant.From("[email protected]", "en-us", voiceSignatureUser1); | ||
meeting.addParticipantAsync(user1, | ||
function () { | ||
// add second participant with voice signature from enrollment step | ||
var voiceSignatureUser2 = "{" + | ||
"Version: 0," + | ||
"Tag: \"<<VOICE_TAG_HERE>>\"," + | ||
"Data: \"<<VOICE_DATA_HERE>>\"" + | ||
"}"; | ||
var user2 = sdk.Participant.From("[email protected]", "en-us", voiceSignatureUser2); | ||
meeting.addParticipantAsync(user2, | ||
function () { | ||
transcriber.sessionStarted = function(s, e) { | ||
console.log("(sessionStarted)"); | ||
}; | ||
transcriber.sessionStopped = function(s, e) { | ||
console.log("(sessionStopped)"); | ||
}; | ||
transcriber.canceled = function(s, e) { | ||
console.log("(canceled)"); | ||
}; | ||
transcriber.transcribed = function(s, e) { | ||
console.log("(transcribed) text: " + e.result.text); | ||
console.log("(transcribed) speakerId: " + e.result.speakerId); | ||
}; | ||
// create the meeting object tracking participants | ||
var meeting = sdk.Meeting.createMeetingAsync(speechTranslationConfig, "myMeeting"); | ||
|
||
// Begin meeting transcription | ||
transcriber.startTranscribingAsync( | ||
function () { }, | ||
function (err) { | ||
console.trace("err - starting transcription: " + err); | ||
}); | ||
}, | ||
function (err) { | ||
console.trace("err - adding user1: " + err); | ||
}); | ||
// create the meeting transcriber. | ||
var transcriber = new sdk.MeetingTranscriber(audioConfig); | ||
|
||
// attach the transcriber to the meeting | ||
transcriber.joinMeetingAsync(meeting, | ||
function () { | ||
// add first participant with voice signature from enrollment step | ||
var voiceSignatureUser1 = "{" + | ||
"Version: 0," + | ||
"Tag: \"<<VOICE_TAG_HERE>>\"," + | ||
"Data: \"<<VOICE_DATA_HERE>>\"" + | ||
"}"; | ||
var user1 = sdk.Participant.From("[email protected]", "en-us", voiceSignatureUser1); | ||
meeting.addParticipantAsync(user1, | ||
function () { | ||
// add second participant with voice signature from enrollment step | ||
var voiceSignatureUser2 = "{" + | ||
"Version: 0," + | ||
"Tag: \"<<VOICE_TAG_HERE>>\"," + | ||
"Data: \"<<VOICE_DATA_HERE>>\"" + | ||
"}"; | ||
var user2 = sdk.Participant.From("[email protected]", "en-us", voiceSignatureUser2); | ||
meeting.addParticipantAsync(user2, | ||
function () { | ||
transcriber.sessionStarted = function(s, e) { | ||
console.log("(sessionStarted)"); | ||
}; | ||
transcriber.sessionStopped = function(s, e) { | ||
console.log("(sessionStopped)"); | ||
}; | ||
transcriber.canceled = function(s, e) { | ||
console.log("(canceled)"); | ||
}; | ||
transcriber.transcribed = function(s, e) { | ||
console.log("(transcribed) text: " + e.result.text); | ||
console.log("(transcribed) speakerId: " + e.result.speakerId); | ||
}; | ||
|
||
// Begin meeting transcription | ||
transcriber.startTranscribingAsync( | ||
function () { }, | ||
function (err) { | ||
console.trace("err - starting transcription: " + err); | ||
}); | ||
}, | ||
function (err) { | ||
console.trace("err - adding user2: " + err); | ||
console.trace("err - adding user1: " + err); | ||
}); | ||
}, | ||
function (err) { | ||
console.trace("err - " + err); | ||
console.trace("err - adding user2: " + err); | ||
}); | ||
|
||
}()); | ||
}, | ||
function (err) { | ||
console.trace("err - " + err); | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.