From 0271231a106d689804b94e639b76e87b4a5f9b5f Mon Sep 17 00:00:00 2001 From: Shir Goldberg <3937986+shirgoldbird@users.noreply.github.com> Date: Mon, 3 Apr 2023 09:43:47 -0700 Subject: [PATCH] update endpointing types to match new API Endpointing now accepts either a boolean or a number representing the amount of time to wait before sending back a message with speech_final. --- src/types/liveTranscriptionOptions.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/types/liveTranscriptionOptions.ts b/src/types/liveTranscriptionOptions.ts index 7463fbf3..08899a6a 100644 --- a/src/types/liveTranscriptionOptions.ts +++ b/src/types/liveTranscriptionOptions.ts @@ -155,9 +155,10 @@ export type LiveTranscriptionOptions = { * that no additional data will improve its prediction, so it immediately * finalizes the result for the processed time range and returns the * transcript with a speech_final parameter set to true. - * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/endpointing + * @see https://developers.deepgram.com/api-reference/speech-recognition-api#operation/transcribeStreamingAudio/properties/ + */ - endpointing?: boolean; + endpointing?: boolean | number; /** * Length of time in milliseconds of silence that voice activation detection * (VAD) will use to detect that a speaker has finished speaking. Used when