diff --git a/Deepgram.Dev.sln b/Deepgram.Dev.sln
index fd6ca7c9..11eb422a 100644
--- a/Deepgram.Dev.sln
+++ b/Deepgram.Dev.sln
@@ -7,8 +7,6 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Deepgram", "Deepgram\Deepgr
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Deepgram.Tests", "Deepgram.Tests\Deepgram.Tests.csproj", "{12C80273-08DD-494C-B06D-DFC6D40B1D95}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\file\PreRecorded.csproj", "{70B63CBA-1130-46D1-A022-6CD31C37C60E}"
-EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Speak", "examples\speak\file\hello-world\Speak.csproj", "{C3AA63DB-4555-4BEF-B2DD-89A3B19A265B}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Streaming", "examples\streaming\file\Streaming.csproj", "{FD8507CC-EECF-4750-81AF-3CF8E536C007}"
@@ -35,6 +33,70 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "url", "url", "{0414D1CF-79F
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Streaming", "examples\streaming\microphone\Streaming.csproj", "{74335799-3B43-432C-ACD9-FBF2AB32A64A}"
EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tests", "tests", "{9D2B336D-17F9-41D5-A4E5-F3EDA7F496A5}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "edge_cases", "edge_cases", "{1280E66D-A375-422A-ACB4-48F17E9C190E}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "expected_failures", "expected_failures", "{5B1C7C37-BAE9-4027-927C-38B2393B3F43}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "keepalive", "keepalive", "{7F7328CF-D932-4D0F-B832-9CCE65E7B308}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "exercise_timeout", "exercise_timeout", "{3AC17A9C-30BB-4298-8E5A-4FAE73189821}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ExerciseTimeout", "tests\expected_failures\exercise_timeout\ExerciseTimeout.csproj", "{E19BE681-F801-444F-A3BD-8F1207FB3982}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "KeepAlive", "tests\edge_cases\keepalive\KeepAlive.csproj", "{D3F886B5-1C28-44AF-A416-2BBE660D5EC4}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "file", "file", "{FA17B9A9-F542-4F8A-AF4B-48404D009967}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "intent", "intent", "{02F10F52-5DFF-4CEC-8AAF-AFA16DEE27F1}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "summary", "summary", "{287650F3-B83F-473E-8251-226069F797D7}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "topic", "topic", "{B1D51F16-688B-4940-833A-8F38DC212292}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "sentiment", "sentiment", "{B95CE5C6-977C-42A2-848A-5C6C99496F8A}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\file\PreRecorded.csproj", "{9C720D12-AF17-40D7-8F92-7D96077BC7FB}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\intent\PreRecorded.csproj", "{172FAC5E-32F0-4377-A92F-CFACDB294561}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\sentiment\PreRecorded.csproj", "{ABBF01EA-1015-4C16-A68C-F30A3904799B}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\summary\PreRecorded.csproj", "{27C58ED4-DB4E-4A38-8D0E-D14854FE1814}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\topic\PreRecorded.csproj", "{C80145E0-5E37-4FA6-83FD-C9264876CAAF}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "PreRecorded", "examples\prerecorded\url\PreRecorded.csproj", "{1E5D1117-C170-4E6A-BC41-F7F8F46F2C59}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "analyze", "analyze", "{83D6D1F7-97E1-4E35-A0AA-E3ED5977EA72}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "intent", "intent", "{F476F3D0-65B2-4F0C-AA93-1E0049C7135A}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "sentiment", "sentiment", "{0B951DDD-626E-4A4D-9379-17AF77A3531E}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "topic", "topic", "{D6F102B6-FEDE-4D9A-A3BB-7BF26F26C3C4}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "summary", "summary", "{92BF7997-2628-4434-A567-6C72352200A4}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Analyze", "examples\analyze\sentiment\Analyze.csproj", "{B3A18F05-F1A4-4CC5-99A2-C77688E52D5E}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Analyze", "examples\analyze\intent\Analyze.csproj", "{7258AEA6-720B-4554-9661-C40F381CA1C0}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Analyze", "examples\analyze\summary\Analyze.csproj", "{DC4F480C-BC9E-4263-80D4-62CCCAF131E3}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Analyze", "examples\analyze\topic\Analyze.csproj", "{1785B862-DAB8-45DC-9EDC-E2D9021CEAA7}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "woodchuck", "woodchuck", "{1826908E-F3F3-4146-A2A1-5B3D71CB7F3D}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Speak", "examples\speak\file\woodchuck\Speak.csproj", "{BE44FCAC-FBFF-4D70-BE52-B181049D1F70}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "manage", "manage", "{FA5723B3-74E9-4221-80EF-4833C1C3DD9F}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "balances", "balances", "{7CD5D816-0F30-45D7-9C9F-872E67F6A711}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Manage", "examples\manage\balances\Manage.csproj", "{2BF14C9B-755E-41FD-BB59-D6A82E0EFC51}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -49,10 +111,6 @@ Global
{12C80273-08DD-494C-B06D-DFC6D40B1D95}.Debug|Any CPU.Build.0 = Debug|Any CPU
{12C80273-08DD-494C-B06D-DFC6D40B1D95}.Release|Any CPU.ActiveCfg = Release|Any CPU
{12C80273-08DD-494C-B06D-DFC6D40B1D95}.Release|Any CPU.Build.0 = Release|Any CPU
- {70B63CBA-1130-46D1-A022-6CD31C37C60E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {70B63CBA-1130-46D1-A022-6CD31C37C60E}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {70B63CBA-1130-46D1-A022-6CD31C37C60E}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {70B63CBA-1130-46D1-A022-6CD31C37C60E}.Release|Any CPU.Build.0 = Release|Any CPU
{C3AA63DB-4555-4BEF-B2DD-89A3B19A265B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C3AA63DB-4555-4BEF-B2DD-89A3B19A265B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C3AA63DB-4555-4BEF-B2DD-89A3B19A265B}.Release|Any CPU.ActiveCfg = Release|Any CPU
@@ -69,12 +127,67 @@ Global
{74335799-3B43-432C-ACD9-FBF2AB32A64A}.Debug|Any CPU.Build.0 = Debug|Any CPU
{74335799-3B43-432C-ACD9-FBF2AB32A64A}.Release|Any CPU.ActiveCfg = Release|Any CPU
{74335799-3B43-432C-ACD9-FBF2AB32A64A}.Release|Any CPU.Build.0 = Release|Any CPU
+ {E19BE681-F801-444F-A3BD-8F1207FB3982}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {E19BE681-F801-444F-A3BD-8F1207FB3982}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {E19BE681-F801-444F-A3BD-8F1207FB3982}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {E19BE681-F801-444F-A3BD-8F1207FB3982}.Release|Any CPU.Build.0 = Release|Any CPU
+ {D3F886B5-1C28-44AF-A416-2BBE660D5EC4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {D3F886B5-1C28-44AF-A416-2BBE660D5EC4}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {D3F886B5-1C28-44AF-A416-2BBE660D5EC4}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {D3F886B5-1C28-44AF-A416-2BBE660D5EC4}.Release|Any CPU.Build.0 = Release|Any CPU
+ {9C720D12-AF17-40D7-8F92-7D96077BC7FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {9C720D12-AF17-40D7-8F92-7D96077BC7FB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {9C720D12-AF17-40D7-8F92-7D96077BC7FB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {9C720D12-AF17-40D7-8F92-7D96077BC7FB}.Release|Any CPU.Build.0 = Release|Any CPU
+ {172FAC5E-32F0-4377-A92F-CFACDB294561}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {172FAC5E-32F0-4377-A92F-CFACDB294561}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {172FAC5E-32F0-4377-A92F-CFACDB294561}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {172FAC5E-32F0-4377-A92F-CFACDB294561}.Release|Any CPU.Build.0 = Release|Any CPU
+ {ABBF01EA-1015-4C16-A68C-F30A3904799B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {ABBF01EA-1015-4C16-A68C-F30A3904799B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {ABBF01EA-1015-4C16-A68C-F30A3904799B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {ABBF01EA-1015-4C16-A68C-F30A3904799B}.Release|Any CPU.Build.0 = Release|Any CPU
+ {27C58ED4-DB4E-4A38-8D0E-D14854FE1814}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {27C58ED4-DB4E-4A38-8D0E-D14854FE1814}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {27C58ED4-DB4E-4A38-8D0E-D14854FE1814}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {27C58ED4-DB4E-4A38-8D0E-D14854FE1814}.Release|Any CPU.Build.0 = Release|Any CPU
+ {C80145E0-5E37-4FA6-83FD-C9264876CAAF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {C80145E0-5E37-4FA6-83FD-C9264876CAAF}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {C80145E0-5E37-4FA6-83FD-C9264876CAAF}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {C80145E0-5E37-4FA6-83FD-C9264876CAAF}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1E5D1117-C170-4E6A-BC41-F7F8F46F2C59}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1E5D1117-C170-4E6A-BC41-F7F8F46F2C59}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1E5D1117-C170-4E6A-BC41-F7F8F46F2C59}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1E5D1117-C170-4E6A-BC41-F7F8F46F2C59}.Release|Any CPU.Build.0 = Release|Any CPU
+ {B3A18F05-F1A4-4CC5-99A2-C77688E52D5E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {B3A18F05-F1A4-4CC5-99A2-C77688E52D5E}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {B3A18F05-F1A4-4CC5-99A2-C77688E52D5E}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {B3A18F05-F1A4-4CC5-99A2-C77688E52D5E}.Release|Any CPU.Build.0 = Release|Any CPU
+ {7258AEA6-720B-4554-9661-C40F381CA1C0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {7258AEA6-720B-4554-9661-C40F381CA1C0}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {7258AEA6-720B-4554-9661-C40F381CA1C0}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {7258AEA6-720B-4554-9661-C40F381CA1C0}.Release|Any CPU.Build.0 = Release|Any CPU
+ {DC4F480C-BC9E-4263-80D4-62CCCAF131E3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {DC4F480C-BC9E-4263-80D4-62CCCAF131E3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {DC4F480C-BC9E-4263-80D4-62CCCAF131E3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {DC4F480C-BC9E-4263-80D4-62CCCAF131E3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1785B862-DAB8-45DC-9EDC-E2D9021CEAA7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1785B862-DAB8-45DC-9EDC-E2D9021CEAA7}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1785B862-DAB8-45DC-9EDC-E2D9021CEAA7}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1785B862-DAB8-45DC-9EDC-E2D9021CEAA7}.Release|Any CPU.Build.0 = Release|Any CPU
+ {BE44FCAC-FBFF-4D70-BE52-B181049D1F70}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {BE44FCAC-FBFF-4D70-BE52-B181049D1F70}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {BE44FCAC-FBFF-4D70-BE52-B181049D1F70}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {BE44FCAC-FBFF-4D70-BE52-B181049D1F70}.Release|Any CPU.Build.0 = Release|Any CPU
+ {2BF14C9B-755E-41FD-BB59-D6A82E0EFC51}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {2BF14C9B-755E-41FD-BB59-D6A82E0EFC51}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {2BF14C9B-755E-41FD-BB59-D6A82E0EFC51}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {2BF14C9B-755E-41FD-BB59-D6A82E0EFC51}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
- {70B63CBA-1130-46D1-A022-6CD31C37C60E} = {0414D1CF-79FB-4C5B-BF2B-88C3C1AA4C32}
{C3AA63DB-4555-4BEF-B2DD-89A3B19A265B} = {AE6FFA55-DD91-4BC1-AFDF-96F64B5221CD}
{FD8507CC-EECF-4750-81AF-3CF8E536C007} = {5E75479B-B84A-4386-9D3E-69CFB30B24C6}
{E2E3000D-FBBA-450E-A4E0-3542B38ADAFD} = {C673DFD1-528A-4BAE-94E6-02EF058AC363}
@@ -86,6 +199,37 @@ Global
{AE6FFA55-DD91-4BC1-AFDF-96F64B5221CD} = {50BA802D-603E-4BD2-9A3E-AFDABC3AF43C}
{0414D1CF-79FB-4C5B-BF2B-88C3C1AA4C32} = {77ACBABB-CF6B-4929-957C-480E29646DFD}
{74335799-3B43-432C-ACD9-FBF2AB32A64A} = {4D6C28C1-4D3F-4CFC-AF76-A389F6B00EC2}
+ {1280E66D-A375-422A-ACB4-48F17E9C190E} = {9D2B336D-17F9-41D5-A4E5-F3EDA7F496A5}
+ {5B1C7C37-BAE9-4027-927C-38B2393B3F43} = {9D2B336D-17F9-41D5-A4E5-F3EDA7F496A5}
+ {7F7328CF-D932-4D0F-B832-9CCE65E7B308} = {1280E66D-A375-422A-ACB4-48F17E9C190E}
+ {3AC17A9C-30BB-4298-8E5A-4FAE73189821} = {5B1C7C37-BAE9-4027-927C-38B2393B3F43}
+ {E19BE681-F801-444F-A3BD-8F1207FB3982} = {3AC17A9C-30BB-4298-8E5A-4FAE73189821}
+ {D3F886B5-1C28-44AF-A416-2BBE660D5EC4} = {7F7328CF-D932-4D0F-B832-9CCE65E7B308}
+ {FA17B9A9-F542-4F8A-AF4B-48404D009967} = {77ACBABB-CF6B-4929-957C-480E29646DFD}
+ {02F10F52-5DFF-4CEC-8AAF-AFA16DEE27F1} = {77ACBABB-CF6B-4929-957C-480E29646DFD}
+ {287650F3-B83F-473E-8251-226069F797D7} = {77ACBABB-CF6B-4929-957C-480E29646DFD}
+ {B1D51F16-688B-4940-833A-8F38DC212292} = {77ACBABB-CF6B-4929-957C-480E29646DFD}
+ {B95CE5C6-977C-42A2-848A-5C6C99496F8A} = {77ACBABB-CF6B-4929-957C-480E29646DFD}
+ {9C720D12-AF17-40D7-8F92-7D96077BC7FB} = {FA17B9A9-F542-4F8A-AF4B-48404D009967}
+ {172FAC5E-32F0-4377-A92F-CFACDB294561} = {02F10F52-5DFF-4CEC-8AAF-AFA16DEE27F1}
+ {ABBF01EA-1015-4C16-A68C-F30A3904799B} = {B95CE5C6-977C-42A2-848A-5C6C99496F8A}
+ {27C58ED4-DB4E-4A38-8D0E-D14854FE1814} = {287650F3-B83F-473E-8251-226069F797D7}
+ {C80145E0-5E37-4FA6-83FD-C9264876CAAF} = {B1D51F16-688B-4940-833A-8F38DC212292}
+ {1E5D1117-C170-4E6A-BC41-F7F8F46F2C59} = {0414D1CF-79FB-4C5B-BF2B-88C3C1AA4C32}
+ {83D6D1F7-97E1-4E35-A0AA-E3ED5977EA72} = {C673DFD1-528A-4BAE-94E6-02EF058AC363}
+ {F476F3D0-65B2-4F0C-AA93-1E0049C7135A} = {83D6D1F7-97E1-4E35-A0AA-E3ED5977EA72}
+ {0B951DDD-626E-4A4D-9379-17AF77A3531E} = {83D6D1F7-97E1-4E35-A0AA-E3ED5977EA72}
+ {D6F102B6-FEDE-4D9A-A3BB-7BF26F26C3C4} = {83D6D1F7-97E1-4E35-A0AA-E3ED5977EA72}
+ {92BF7997-2628-4434-A567-6C72352200A4} = {83D6D1F7-97E1-4E35-A0AA-E3ED5977EA72}
+ {B3A18F05-F1A4-4CC5-99A2-C77688E52D5E} = {0B951DDD-626E-4A4D-9379-17AF77A3531E}
+ {7258AEA6-720B-4554-9661-C40F381CA1C0} = {F476F3D0-65B2-4F0C-AA93-1E0049C7135A}
+ {DC4F480C-BC9E-4263-80D4-62CCCAF131E3} = {92BF7997-2628-4434-A567-6C72352200A4}
+ {1785B862-DAB8-45DC-9EDC-E2D9021CEAA7} = {D6F102B6-FEDE-4D9A-A3BB-7BF26F26C3C4}
+ {1826908E-F3F3-4146-A2A1-5B3D71CB7F3D} = {50BA802D-603E-4BD2-9A3E-AFDABC3AF43C}
+ {BE44FCAC-FBFF-4D70-BE52-B181049D1F70} = {1826908E-F3F3-4146-A2A1-5B3D71CB7F3D}
+ {FA5723B3-74E9-4221-80EF-4833C1C3DD9F} = {C673DFD1-528A-4BAE-94E6-02EF058AC363}
+ {7CD5D816-0F30-45D7-9C9F-872E67F6A711} = {FA5723B3-74E9-4221-80EF-4833C1C3DD9F}
+ {2BF14C9B-755E-41FD-BB59-D6A82E0EFC51} = {7CD5D816-0F30-45D7-9C9F-872E67F6A711}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {8D4ABC6D-7126-4EE2-9303-43A954616B2A}
diff --git a/Deepgram/Clients/Live/v1/Client.cs b/Deepgram/Clients/Live/v1/Client.cs
index c4cda267..76963872 100644
--- a/Deepgram/Clients/Live/v1/Client.cs
+++ b/Deepgram/Clients/Live/v1/Client.cs
@@ -2,9 +2,6 @@
// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
// SPDX-License-Identifier: MIT
-using System;
-using System.Diagnostics.Tracing;
-using System.Net.WebSockets;
using Deepgram.Models.Authenticate.v1;
using Deepgram.Models.Live.v1;
@@ -19,7 +16,7 @@ public class Client : Attribute, IDisposable
private readonly DeepgramWsClientOptions _deepgramClientOptions;
private ClientWebSocket? _clientWebSocket;
- private CancellationTokenSource _cancellationTokenSource;
+ private CancellationTokenSource? _cancellationTokenSource;
#endregion
/// Required DeepgramApiKey
@@ -396,7 +393,6 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
switch (val)
{
case LiveType.Open:
- //var openResponse = new ResponseEvent(data.Deserialize());
var openResponse = data.Deserialize();
if (_openReceived == null)
{
@@ -412,11 +408,9 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking OpenResponse. event: {openResponse}");
- _openReceived.Invoke(null, openResponse);
- //InvokeResponseReceived(_openReceived, openResponse);
+ InvokeParallel(_openReceived, openResponse);
break;
case LiveType.Results:
- //var eventResponse = new ResponseEvent(data.Deserialize());
var resultResponse = data.Deserialize();
if (_resultsReceived == null)
{
@@ -424,7 +418,7 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
Log.Verbose("ProcessDataReceived", "LEAVE");
return;
}
- if ( resultResponse == null)
+ if (resultResponse == null)
{
Log.Warning("ProcessDataReceived", "ResultResponse is invalid");
Log.Verbose("ProcessDataReceived", "LEAVE");
@@ -432,11 +426,9 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking ResultsResponse. event: {resultResponse}");
- _resultsReceived.Invoke(null, resultResponse);
- //InvokeResponseReceived(_resultsReceived, eventResponse);
+ InvokeParallel(_resultsReceived, resultResponse);
break;
case LiveType.Metadata:
- //var metadataResponse = new ResponseEvent(data.Deserialize());
var metadataResponse = data.Deserialize();
if (_metadataReceived == null)
{
@@ -452,11 +444,9 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking MetadataResponse. event: {metadataResponse}");
- _metadataReceived.Invoke(null, metadataResponse);
- //InvokeResponseReceived(_metadataReceived, metadataResponse);
+ InvokeParallel(_metadataReceived, metadataResponse);
break;
case LiveType.UtteranceEnd:
- //var utteranceEndResponse = new ResponseEvent(data.Deserialize());
var utteranceEndResponse = data.Deserialize();
if (_utteranceEndReceived == null)
{
@@ -472,11 +462,9 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking UtteranceEndResponse. event: {utteranceEndResponse}");
- _utteranceEndReceived.Invoke(null, utteranceEndResponse);
- //InvokeResponseReceived(_utteranceEndReceived, utteranceEndResponse);
+ InvokeParallel(_utteranceEndReceived, utteranceEndResponse);
break;
case LiveType.SpeechStarted:
- //var speechStartedResponse = new ResponseEvent(data.Deserialize());
var speechStartedResponse = data.Deserialize();
if (_speechStartedReceived == null)
{
@@ -492,11 +480,9 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking SpeechStartedResponse. event: {speechStartedResponse}");
- _speechStartedReceived.Invoke(null, speechStartedResponse);
- //InvokeResponseReceived(_speechStartedReceived, speechStartedResponse);
+ InvokeParallel(_speechStartedReceived, speechStartedResponse);
break;
case LiveType.Close:
- //var closeResponse = new ResponseEvent(data.Deserialize());
var closeResponse = data.Deserialize();
if (_closeReceived == null)
{
@@ -512,11 +498,9 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking CloseResponse. event: {closeResponse}");
- _closeReceived.Invoke(null, closeResponse);
- //InvokeResponseReceived(_closeReceived, closeResponse);
+ InvokeParallel(_closeReceived, closeResponse);
break;
case LiveType.Error:
- //var errorResponse = new ResponseEvent(data.Deserialize());
var errorResponse = data.Deserialize();
if (_errorReceived == null)
{
@@ -532,8 +516,7 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
}
Log.Debug("ProcessDataReceived", $"Invoking ErrorResponse. event: {errorResponse}");
- _errorReceived.Invoke(null, errorResponse);
- //InvokeResponseReceived(_errorReceived, errorResponse);
+ InvokeParallel(_errorReceived, errorResponse);
break;
default:
if (_unhandledReceived == null)
@@ -543,14 +526,12 @@ internal void ProcessDataReceived(WebSocketReceiveResult result, MemoryStream ms
return;
}
- //var unhandledResponse = new ResponseEvent(data.Deserialize());
var unhandledResponse = new UnhandledResponse();
unhandledResponse.Type = LiveType.Unhandled;
unhandledResponse.Raw = response;
Log.Debug("ProcessDataReceived", $"Invoking UnhandledResponse. event: {unhandledResponse}");
- _unhandledReceived.Invoke(null, unhandledResponse);
- //InvokeResponseReceived(_unhandledReceived, unhandledResponse);
+ InvokeParallel(_unhandledReceived, unhandledResponse);
break;
}
@@ -627,12 +608,18 @@ await _clientWebSocket.CloseOutputAsync(
// Always request cancellation to the local token source, if some function has been called without a token
if (cancellationToken != null)
{
- Log.Debug("Stop", "Cancelling provided cancellation token...");
+ Log.Debug("Stop", "Cancelling provided token...");
cancellationToken.Cancel();
}
Log.Debug("Stop", "Disposing WebSocket connection...");
- _cancellationTokenSource.Cancel();
+ if (_cancellationTokenSource != null)
+ {
+ Log.Debug("Stop", "Cancelling native token...");
+ _cancellationTokenSource.Cancel();
+ _cancellationTokenSource.Dispose();
+ _cancellationTokenSource = null;
+ }
Log.Debug("Stop", "Succeeded");
Log.Verbose("Stop", "LEAVE");
@@ -684,6 +671,28 @@ internal static Uri GetUri(DeepgramWsClientOptions options, LiveSchema parameter
return new Uri($"{options.BaseAddress}/{UriSegments.LISTEN}?{queryString}");
}
+
+ internal void InvokeParallel(EventHandler eventHandler, T e)
+ {
+ if (eventHandler != null)
+ {
+ try
+ {
+ Parallel.ForEach(
+ eventHandler.GetInvocationList().Cast>(),
+ (handler) =>
+ handler(null, e));
+ }
+ catch (AggregateException ae)
+ {
+ Log.Error("InvokeParallel", $"AggregateException occurred in one or more event handlers: {ae}");
+ }
+ catch (Exception ex)
+ {
+ Log.Error("InvokeParallel", $"Exception occurred in event handler: {ex}");
+ }
+ }
+ }
#endregion
#region Dispose
@@ -704,6 +713,7 @@ public void Dispose()
_cancellationTokenSource.Cancel();
}
_cancellationTokenSource.Dispose();
+ _cancellationTokenSource = null;
}
if (_sendChannel != null)
@@ -711,19 +721,13 @@ public void Dispose()
_sendChannel.Writer.Complete();
}
- _clientWebSocket.Dispose();
+ if (_clientWebSocket != null)
+ {
+ _clientWebSocket.Dispose();
+ _clientWebSocket = null;
+ }
+
GC.SuppressFinalize(this);
}
-
- //internal void InvokeResponseReceived(EventHandler eventHandler, ResponseEvent e)
- //{
- // if (eventHandler != null)
- // {
- // Parallel.ForEach(
- // eventHandler.GetInvocationList().Cast(),
- // (handler) =>
- // handler(null, e));
- // }
- //}
#endregion
}
diff --git a/Deepgram/GlobalUsings.cs b/Deepgram/GlobalUsings.cs
index a33d6c16..d3864d06 100644
--- a/Deepgram/GlobalUsings.cs
+++ b/Deepgram/GlobalUsings.cs
@@ -17,6 +17,5 @@
global using Deepgram.Logger;
global using Deepgram.Utilities;
global using Microsoft.Extensions.DependencyInjection;
-global using Microsoft.Extensions.Logging;
global using Polly;
global using Polly.Contrib.WaitAndRetry;
diff --git a/Deepgram/Logger/Log.cs b/Deepgram/Logger/Log.cs
index 04482baa..deaaff52 100644
--- a/Deepgram/Logger/Log.cs
+++ b/Deepgram/Logger/Log.cs
@@ -25,14 +25,14 @@ public static Serilog.ILogger Initialize(LogLevel level = LogLevel.Information,
{
instance = new LoggerConfiguration()
.MinimumLevel.Is((Serilog.Events.LogEventLevel) level)
- .WriteTo.Console()
- .WriteTo.File(filename)
+ .WriteTo.Console(outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff} [{Level}] {Message}{NewLine}{Exception}")
+ .WriteTo.File(filename, outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff} [{Level}] {Message}{NewLine}{Exception}")
.CreateLogger();
return instance;
}
instance = new LoggerConfiguration()
.MinimumLevel.Is((Serilog.Events.LogEventLevel)level)
- .WriteTo.Console()
+ .WriteTo.Console(outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff} [{Level}] {Message}{NewLine}{Exception}")
.CreateLogger();
return instance;
}
diff --git a/examples/analyze/intent/Analyze.csproj b/examples/analyze/intent/Analyze.csproj
new file mode 100644
index 00000000..d0ea3e2e
--- /dev/null
+++ b/examples/analyze/intent/Analyze.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/analyze/intent/Program.cs b/examples/analyze/intent/Program.cs
new file mode 100644
index 00000000..b778ce4c
--- /dev/null
+++ b/examples/analyze/intent/Program.cs
@@ -0,0 +1,45 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.Analyze.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new AnalyzeClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"conversation.txt"))
+ {
+ Console.WriteLine("Error: File 'conversation.txt' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"conversation.txt");
+ var response = await deepgramClient.AnalyzeFile(
+ audioData,
+ new AnalyzeSchema()
+ {
+ Language = "en",
+ Intents = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/analyze/intent/conversation.txt b/examples/analyze/intent/conversation.txt
new file mode 100644
index 00000000..2af89e08
--- /dev/null
+++ b/examples/analyze/intent/conversation.txt
@@ -0,0 +1,71 @@
+Meet Deepgram Aura: real-time text-to-speech for real-time AI agents
+----------
+It’s been a year since large language models (LLMs) seemingly went mainstream overnight (Happy Birthday, ChatGPT!!!), and the world has witnessed both rapid development of these technologies and immense interest in their potential. We believe that we have reached an inflection point where voice-based interfaces will be the primary means to accessing LLMs and the experiences they unlock. Here are a few recent signals in support of our thesis:
+
+- Good old fashioned voice notes are enjoying a healthy resurgence.
+
+- According to a recent survey, a majority of respondents stated phone calls are still their preferred communication channel for resolving customer service issues.
+
+- An emerging boom in wearable devices equipped with continuous listening and speech AI technology is gaining steam.
+
+- OpenAI recently enabled voice interactions in ChatGPT.
+
+- A wave of interest in voice-first experiences and tools is sweeping across brands, investors, and tech companies.
+
+Thanks to ChatGPT and the advent of the LLM era, the conversational AI tech stack has advanced sufficiently to support productive (not frustrating) voice-powered AI assistants and agents that can interact with humans in a natural manner. We have already observed this from our most innovative customers who are actively turning to these technologies to build a diverse range of AI agents for voice ordering systems, interview bots, personal AI assistants, automated drive-thru tellers, and autonomous sales and customer service agents.
+
+While these AI agents hold immense potential, many customers have expressed their dissatisfaction with the current crop of voice AI vendors, citing roadblocks related to speed, cost, reliability, and conversational quality. That’s why we’re excited to introduce our own text-to-speech (TTS) API, Deepgram Aura, built for real-time, conversational voice AI agents.
+
+Whether used on its own or in conjunction with our industry-leading Nova-2 speech-to-text API, we’ll soon provide developers with a complete speech AI platform, giving them the essential building blocks they need to build high throughput, real-time AI agents of the future.
+
+We are thrilled about the progress our initial group of developers has made using Aura, so much so that we are extending limited access to a select few partners who will be free to begin integrating with Aura immediately. With their feedback, we’ll continue to enhance our suite of voices and API features, as well as ensure a smooth launch of their production-grade applications.
+
+
+What Customers Want
+----------
+I feel the need, the need for speed
+What we’ve heard from many of our customers and partners is that voice AI technology today caters to two main areas: high production or high throughput.
+
+High Production is all about crafting the perfect voice. It's used in projects where every tone and inflection matters, like in video games or audiobooks, to really bring a scene or story to life. Here, voice quality is king, with creators investing hours to fine-tune every detail for a powerful emotional impact. The primary benefit is the ability to swap out a high-paid voice actor with AI where you have more dynamic control over what’s being said while also achieving some cost savings. But these use cases are more specialized and represent just a sliver of the overall voice AI opportunity.
+
+On the flip side, High Throughput is about handling many quick, one-off interactions for real-time conversations at scale. Think fast food ordering, booking appointments, or inquiring about the latest deals at a car dealership. These tasks are relevant to just about everyone on the planet, and they require fast, efficient text-to-speech conversion for an AI agent to fulfill them. While voice quality is still important to keep users engaged, quality here is more about the naturalness of the flow of conversation and less about sounding like Morgan Freeman. But the primary focus for most customers in this category is on improving customer outcomes, meaning speed and efficiency are must-haves for ensuring these everyday exchanges are smooth and reliable at high volume.
+
+"Deepgram showed me less than 200ms latency today. That's the fastest text-to-speech I’ve ever seen. And our customers would be more than satisfied with the conversation quality."
+
+Jordan Dearsley, Co-founder at Vapi
+
+Although high production use cases seem to be well-served with UI-centric production tools, high throughput, real-time use cases still mostly rely on APIs provided by the major cloud providers. And our customers have been telling us that they’ve been falling short, with insufficient quality for a good user experience, too much latency to make real-time use cases work, and costs too expensive to operate at scale.
+
+
+More human than human
+----------
+With Aura, we’ll give realistic voices to AI agents. Our goal is to craft text-to-speech capabilities that mirror natural human conversations, including timely responses, the incorporation of natural speech fillers like 'um' and 'uh' during contemplation, and the modulation of tone and emotion according to the conversational context. We aim to incorporate laughter and other speech nuances as well. Furthermore, we are dedicated to tailoring these voices to their specific applications, ensuring they remain composed and articulate, particularly in enunciating account numbers and business names with precision.
+
+"I don’t really consider Azure and the other guys anymore because the voices sound so robotic."
+Jordan Dearsley, Co-founder at Vapi
+
+In blind evaluation trials conducted for benchmarking, early versions of Aura have consistently been rated as sounding more human than prominent alternatives, even outranking human speakers for various audio clips more often than not on average. We were pleasantly surprised by these results (stay tuned for a future post containing comprehensive benchmarks for speed and quality soon!), so much so that we’re accelerating our development timeline and publicly announcing today’s waitlist expansion.
+
+Here are some sample clips generated by one of the earliest iterations of Aura. The quality and overall performance will continue to improve with additional model training and refinement. We encourage you to give them a listen and note the naturalness of their cadence, rhythm, and tone in the flow of conversation with another human.
+
+
+Our Approach
+----------
+For nearly a decade, we’ve worked tirelessly to advance the art of the possible in speech recognition and spoken language understanding. Along the way, we’ve transcribed trillions of spoken words into highly accurate transcriptions. Our model research team has developed novel transformer architectures equipped to deal with the nuances of conversational audio–across different languages, accents, and dialects, while handling disfluencies and the changing rhythms, tones, cadences, and inflections that occur in natural, back-and-forth conversations.
+
+And all the while, we’ve purposefully built our models under limited constraints to optimize their speed and efficiency. With support for dozens of languages and custom model training, our technical team has trained and deployed thousands of speech AI models (more than anybody else) which we operate and manage for our customers each day using our own computing infrastructure.
+
+We also have our own in-house data labeling and data ops team with years of experience building bespoke workflows to record, store, and transfer vast amounts of audio in order to label it and continuously grow our bank of high-quality data (millions of hours and counting) used in our model training.
+
+These combined experiences have made us experts in processing and modeling speech audio, especially in support of streaming use cases with our real-time STT models. Our customers have been asking if we could apply the same approach for TTS, and we can.
+
+So what can you expect from Aura? Delivering the same market-leading value and performance as Nova-2 does for STT. Aura is built to be the panacea for speed, quality, and efficiency–the fastest of the high-quality options, and the best quality of the fast ones. And that’s really what end users need and what our customers have been asking us to build.
+
+"Deepgram is a valued partner, providing our customers with high throughput speech-to-text that delivers unrivaled performance without tradeoffs between quality, speed, and cost. We're excited to see Deepgram extend their speech AI platform and bring this approach to the text-to-speech market." - Richard Dumas, VP AI Product Strategy at Five9
+
+
+What's Next
+----------
+As we’ve discussed, scaled voice agents are a high throughput use case, and we believe their success will ultimately depend on a unified approach to audio, one that strikes the right balance between natural voice quality, responsiveness, and cost-efficiency. And with Aura, we’re just getting started. We’re looking forward to continuing to work with customers like Asurion and partners like Five9 across speech-to-text AND text-to-speech as we help them define the future of AI agents, and we invite you to join us on this journey.
+
+We expect to release generally early next year, but if you’re working on any real-time AI agent use cases, join our waitlist today to jumpstart your development in production as we continue to refine our model and API features with your direct feedback.
\ No newline at end of file
diff --git a/examples/analyze/sentiment/Analyze.csproj b/examples/analyze/sentiment/Analyze.csproj
new file mode 100644
index 00000000..d0ea3e2e
--- /dev/null
+++ b/examples/analyze/sentiment/Analyze.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/analyze/sentiment/Program.cs b/examples/analyze/sentiment/Program.cs
new file mode 100644
index 00000000..7312ff85
--- /dev/null
+++ b/examples/analyze/sentiment/Program.cs
@@ -0,0 +1,45 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.Analyze.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new AnalyzeClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"conversation.txt"))
+ {
+ Console.WriteLine("Error: File 'conversation.txt' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"conversation.txt");
+ var response = await deepgramClient.AnalyzeFile(
+ audioData,
+ new AnalyzeSchema()
+ {
+ Language = "en",
+ Sentiment = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/analyze/sentiment/conversation.txt b/examples/analyze/sentiment/conversation.txt
new file mode 100644
index 00000000..2af89e08
--- /dev/null
+++ b/examples/analyze/sentiment/conversation.txt
@@ -0,0 +1,71 @@
+Meet Deepgram Aura: real-time text-to-speech for real-time AI agents
+----------
+It’s been a year since large language models (LLMs) seemingly went mainstream overnight (Happy Birthday, ChatGPT!!!), and the world has witnessed both rapid development of these technologies and immense interest in their potential. We believe that we have reached an inflection point where voice-based interfaces will be the primary means to accessing LLMs and the experiences they unlock. Here are a few recent signals in support of our thesis:
+
+- Good old fashioned voice notes are enjoying a healthy resurgence.
+
+- According to a recent survey, a majority of respondents stated phone calls are still their preferred communication channel for resolving customer service issues.
+
+- An emerging boom in wearable devices equipped with continuous listening and speech AI technology is gaining steam.
+
+- OpenAI recently enabled voice interactions in ChatGPT.
+
+- A wave of interest in voice-first experiences and tools is sweeping across brands, investors, and tech companies.
+
+Thanks to ChatGPT and the advent of the LLM era, the conversational AI tech stack has advanced sufficiently to support productive (not frustrating) voice-powered AI assistants and agents that can interact with humans in a natural manner. We have already observed this from our most innovative customers who are actively turning to these technologies to build a diverse range of AI agents for voice ordering systems, interview bots, personal AI assistants, automated drive-thru tellers, and autonomous sales and customer service agents.
+
+While these AI agents hold immense potential, many customers have expressed their dissatisfaction with the current crop of voice AI vendors, citing roadblocks related to speed, cost, reliability, and conversational quality. That’s why we’re excited to introduce our own text-to-speech (TTS) API, Deepgram Aura, built for real-time, conversational voice AI agents.
+
+Whether used on its own or in conjunction with our industry-leading Nova-2 speech-to-text API, we’ll soon provide developers with a complete speech AI platform, giving them the essential building blocks they need to build high throughput, real-time AI agents of the future.
+
+We are thrilled about the progress our initial group of developers has made using Aura, so much so that we are extending limited access to a select few partners who will be free to begin integrating with Aura immediately. With their feedback, we’ll continue to enhance our suite of voices and API features, as well as ensure a smooth launch of their production-grade applications.
+
+
+What Customers Want
+----------
+I feel the need, the need for speed
+What we’ve heard from many of our customers and partners is that voice AI technology today caters to two main areas: high production or high throughput.
+
+High Production is all about crafting the perfect voice. It's used in projects where every tone and inflection matters, like in video games or audiobooks, to really bring a scene or story to life. Here, voice quality is king, with creators investing hours to fine-tune every detail for a powerful emotional impact. The primary benefit is the ability to swap out a high-paid voice actor with AI where you have more dynamic control over what’s being said while also achieving some cost savings. But these use cases are more specialized and represent just a sliver of the overall voice AI opportunity.
+
+On the flip side, High Throughput is about handling many quick, one-off interactions for real-time conversations at scale. Think fast food ordering, booking appointments, or inquiring about the latest deals at a car dealership. These tasks are relevant to just about everyone on the planet, and they require fast, efficient text-to-speech conversion for an AI agent to fulfill them. While voice quality is still important to keep users engaged, quality here is more about the naturalness of the flow of conversation and less about sounding like Morgan Freeman. But the primary focus for most customers in this category is on improving customer outcomes, meaning speed and efficiency are must-haves for ensuring these everyday exchanges are smooth and reliable at high volume.
+
+"Deepgram showed me less than 200ms latency today. That's the fastest text-to-speech I’ve ever seen. And our customers would be more than satisfied with the conversation quality."
+
+Jordan Dearsley, Co-founder at Vapi
+
+Although high production use cases seem to be well-served with UI-centric production tools, high throughput, real-time use cases still mostly rely on APIs provided by the major cloud providers. And our customers have been telling us that they’ve been falling short, with insufficient quality for a good user experience, too much latency to make real-time use cases work, and costs too expensive to operate at scale.
+
+
+More human than human
+----------
+With Aura, we’ll give realistic voices to AI agents. Our goal is to craft text-to-speech capabilities that mirror natural human conversations, including timely responses, the incorporation of natural speech fillers like 'um' and 'uh' during contemplation, and the modulation of tone and emotion according to the conversational context. We aim to incorporate laughter and other speech nuances as well. Furthermore, we are dedicated to tailoring these voices to their specific applications, ensuring they remain composed and articulate, particularly in enunciating account numbers and business names with precision.
+
+"I don’t really consider Azure and the other guys anymore because the voices sound so robotic."
+Jordan Dearsley, Co-founder at Vapi
+
+In blind evaluation trials conducted for benchmarking, early versions of Aura have consistently been rated as sounding more human than prominent alternatives, even outranking human speakers for various audio clips more often than not on average. We were pleasantly surprised by these results (stay tuned for a future post containing comprehensive benchmarks for speed and quality soon!), so much so that we’re accelerating our development timeline and publicly announcing today’s waitlist expansion.
+
+Here are some sample clips generated by one of the earliest iterations of Aura. The quality and overall performance will continue to improve with additional model training and refinement. We encourage you to give them a listen and note the naturalness of their cadence, rhythm, and tone in the flow of conversation with another human.
+
+
+Our Approach
+----------
+For nearly a decade, we’ve worked tirelessly to advance the art of the possible in speech recognition and spoken language understanding. Along the way, we’ve transcribed trillions of spoken words into highly accurate transcriptions. Our model research team has developed novel transformer architectures equipped to deal with the nuances of conversational audio–across different languages, accents, and dialects, while handling disfluencies and the changing rhythms, tones, cadences, and inflections that occur in natural, back-and-forth conversations.
+
+And all the while, we’ve purposefully built our models under limited constraints to optimize their speed and efficiency. With support for dozens of languages and custom model training, our technical team has trained and deployed thousands of speech AI models (more than anybody else) which we operate and manage for our customers each day using our own computing infrastructure.
+
+We also have our own in-house data labeling and data ops team with years of experience building bespoke workflows to record, store, and transfer vast amounts of audio in order to label it and continuously grow our bank of high-quality data (millions of hours and counting) used in our model training.
+
+These combined experiences have made us experts in processing and modeling speech audio, especially in support of streaming use cases with our real-time STT models. Our customers have been asking if we could apply the same approach for TTS, and we can.
+
+So what can you expect from Aura? Delivering the same market-leading value and performance as Nova-2 does for STT. Aura is built to be the panacea for speed, quality, and efficiency–the fastest of the high-quality options, and the best quality of the fast ones. And that’s really what end users need and what our customers have been asking us to build.
+
+"Deepgram is a valued partner, providing our customers with high throughput speech-to-text that delivers unrivaled performance without tradeoffs between quality, speed, and cost. We're excited to see Deepgram extend their speech AI platform and bring this approach to the text-to-speech market." - Richard Dumas, VP AI Product Strategy at Five9
+
+
+What's Next
+----------
+As we’ve discussed, scaled voice agents are a high throughput use case, and we believe their success will ultimately depend on a unified approach to audio, one that strikes the right balance between natural voice quality, responsiveness, and cost-efficiency. And with Aura, we’re just getting started. We’re looking forward to continuing to work with customers like Asurion and partners like Five9 across speech-to-text AND text-to-speech as we help them define the future of AI agents, and we invite you to join us on this journey.
+
+We expect to release generally early next year, but if you’re working on any real-time AI agent use cases, join our waitlist today to jumpstart your development in production as we continue to refine our model and API features with your direct feedback.
\ No newline at end of file
diff --git a/examples/analyze/summary/Analyze.csproj b/examples/analyze/summary/Analyze.csproj
new file mode 100644
index 00000000..d0ea3e2e
--- /dev/null
+++ b/examples/analyze/summary/Analyze.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/analyze/summary/Program.cs b/examples/analyze/summary/Program.cs
new file mode 100644
index 00000000..d848fdec
--- /dev/null
+++ b/examples/analyze/summary/Program.cs
@@ -0,0 +1,45 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.Analyze.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new AnalyzeClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"conversation.txt"))
+ {
+ Console.WriteLine("Error: File 'conversation.txt' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"conversation.txt");
+ var response = await deepgramClient.AnalyzeFile(
+ audioData,
+ new AnalyzeSchema()
+ {
+ Language = "en",
+ Summarize = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/analyze/summary/conversation.txt b/examples/analyze/summary/conversation.txt
new file mode 100644
index 00000000..2af89e08
--- /dev/null
+++ b/examples/analyze/summary/conversation.txt
@@ -0,0 +1,71 @@
+Meet Deepgram Aura: real-time text-to-speech for real-time AI agents
+----------
+It’s been a year since large language models (LLMs) seemingly went mainstream overnight (Happy Birthday, ChatGPT!!!), and the world has witnessed both rapid development of these technologies and immense interest in their potential. We believe that we have reached an inflection point where voice-based interfaces will be the primary means to accessing LLMs and the experiences they unlock. Here are a few recent signals in support of our thesis:
+
+- Good old fashioned voice notes are enjoying a healthy resurgence.
+
+- According to a recent survey, a majority of respondents stated phone calls are still their preferred communication channel for resolving customer service issues.
+
+- An emerging boom in wearable devices equipped with continuous listening and speech AI technology is gaining steam.
+
+- OpenAI recently enabled voice interactions in ChatGPT.
+
+- A wave of interest in voice-first experiences and tools is sweeping across brands, investors, and tech companies.
+
+Thanks to ChatGPT and the advent of the LLM era, the conversational AI tech stack has advanced sufficiently to support productive (not frustrating) voice-powered AI assistants and agents that can interact with humans in a natural manner. We have already observed this from our most innovative customers who are actively turning to these technologies to build a diverse range of AI agents for voice ordering systems, interview bots, personal AI assistants, automated drive-thru tellers, and autonomous sales and customer service agents.
+
+While these AI agents hold immense potential, many customers have expressed their dissatisfaction with the current crop of voice AI vendors, citing roadblocks related to speed, cost, reliability, and conversational quality. That’s why we’re excited to introduce our own text-to-speech (TTS) API, Deepgram Aura, built for real-time, conversational voice AI agents.
+
+Whether used on its own or in conjunction with our industry-leading Nova-2 speech-to-text API, we’ll soon provide developers with a complete speech AI platform, giving them the essential building blocks they need to build high throughput, real-time AI agents of the future.
+
+We are thrilled about the progress our initial group of developers has made using Aura, so much so that we are extending limited access to a select few partners who will be free to begin integrating with Aura immediately. With their feedback, we’ll continue to enhance our suite of voices and API features, as well as ensure a smooth launch of their production-grade applications.
+
+
+What Customers Want
+----------
+I feel the need, the need for speed
+What we’ve heard from many of our customers and partners is that voice AI technology today caters to two main areas: high production or high throughput.
+
+High Production is all about crafting the perfect voice. It's used in projects where every tone and inflection matters, like in video games or audiobooks, to really bring a scene or story to life. Here, voice quality is king, with creators investing hours to fine-tune every detail for a powerful emotional impact. The primary benefit is the ability to swap out a high-paid voice actor with AI where you have more dynamic control over what’s being said while also achieving some cost savings. But these use cases are more specialized and represent just a sliver of the overall voice AI opportunity.
+
+On the flip side, High Throughput is about handling many quick, one-off interactions for real-time conversations at scale. Think fast food ordering, booking appointments, or inquiring about the latest deals at a car dealership. These tasks are relevant to just about everyone on the planet, and they require fast, efficient text-to-speech conversion for an AI agent to fulfill them. While voice quality is still important to keep users engaged, quality here is more about the naturalness of the flow of conversation and less about sounding like Morgan Freeman. But the primary focus for most customers in this category is on improving customer outcomes, meaning speed and efficiency are must-haves for ensuring these everyday exchanges are smooth and reliable at high volume.
+
+"Deepgram showed me less than 200ms latency today. That's the fastest text-to-speech I’ve ever seen. And our customers would be more than satisfied with the conversation quality."
+
+Jordan Dearsley, Co-founder at Vapi
+
+Although high production use cases seem to be well-served with UI-centric production tools, high throughput, real-time use cases still mostly rely on APIs provided by the major cloud providers. And our customers have been telling us that they’ve been falling short, with insufficient quality for a good user experience, too much latency to make real-time use cases work, and costs too expensive to operate at scale.
+
+
+More human than human
+----------
+With Aura, we’ll give realistic voices to AI agents. Our goal is to craft text-to-speech capabilities that mirror natural human conversations, including timely responses, the incorporation of natural speech fillers like 'um' and 'uh' during contemplation, and the modulation of tone and emotion according to the conversational context. We aim to incorporate laughter and other speech nuances as well. Furthermore, we are dedicated to tailoring these voices to their specific applications, ensuring they remain composed and articulate, particularly in enunciating account numbers and business names with precision.
+
+"I don’t really consider Azure and the other guys anymore because the voices sound so robotic."
+Jordan Dearsley, Co-founder at Vapi
+
+In blind evaluation trials conducted for benchmarking, early versions of Aura have consistently been rated as sounding more human than prominent alternatives, even outranking human speakers for various audio clips more often than not on average. We were pleasantly surprised by these results (stay tuned for a future post containing comprehensive benchmarks for speed and quality soon!), so much so that we’re accelerating our development timeline and publicly announcing today’s waitlist expansion.
+
+Here are some sample clips generated by one of the earliest iterations of Aura. The quality and overall performance will continue to improve with additional model training and refinement. We encourage you to give them a listen and note the naturalness of their cadence, rhythm, and tone in the flow of conversation with another human.
+
+
+Our Approach
+----------
+For nearly a decade, we’ve worked tirelessly to advance the art of the possible in speech recognition and spoken language understanding. Along the way, we’ve transcribed trillions of spoken words into highly accurate transcriptions. Our model research team has developed novel transformer architectures equipped to deal with the nuances of conversational audio–across different languages, accents, and dialects, while handling disfluencies and the changing rhythms, tones, cadences, and inflections that occur in natural, back-and-forth conversations.
+
+And all the while, we’ve purposefully built our models under limited constraints to optimize their speed and efficiency. With support for dozens of languages and custom model training, our technical team has trained and deployed thousands of speech AI models (more than anybody else) which we operate and manage for our customers each day using our own computing infrastructure.
+
+We also have our own in-house data labeling and data ops team with years of experience building bespoke workflows to record, store, and transfer vast amounts of audio in order to label it and continuously grow our bank of high-quality data (millions of hours and counting) used in our model training.
+
+These combined experiences have made us experts in processing and modeling speech audio, especially in support of streaming use cases with our real-time STT models. Our customers have been asking if we could apply the same approach for TTS, and we can.
+
+So what can you expect from Aura? Delivering the same market-leading value and performance as Nova-2 does for STT. Aura is built to be the panacea for speed, quality, and efficiency–the fastest of the high-quality options, and the best quality of the fast ones. And that’s really what end users need and what our customers have been asking us to build.
+
+"Deepgram is a valued partner, providing our customers with high throughput speech-to-text that delivers unrivaled performance without tradeoffs between quality, speed, and cost. We're excited to see Deepgram extend their speech AI platform and bring this approach to the text-to-speech market." - Richard Dumas, VP AI Product Strategy at Five9
+
+
+What's Next
+----------
+As we’ve discussed, scaled voice agents are a high throughput use case, and we believe their success will ultimately depend on a unified approach to audio, one that strikes the right balance between natural voice quality, responsiveness, and cost-efficiency. And with Aura, we’re just getting started. We’re looking forward to continuing to work with customers like Asurion and partners like Five9 across speech-to-text AND text-to-speech as we help them define the future of AI agents, and we invite you to join us on this journey.
+
+We expect to release generally early next year, but if you’re working on any real-time AI agent use cases, join our waitlist today to jumpstart your development in production as we continue to refine our model and API features with your direct feedback.
\ No newline at end of file
diff --git a/examples/analyze/topic/Analyze.csproj b/examples/analyze/topic/Analyze.csproj
new file mode 100644
index 00000000..d0ea3e2e
--- /dev/null
+++ b/examples/analyze/topic/Analyze.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/analyze/topic/Program.cs b/examples/analyze/topic/Program.cs
new file mode 100644
index 00000000..47a45070
--- /dev/null
+++ b/examples/analyze/topic/Program.cs
@@ -0,0 +1,45 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.Analyze.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new AnalyzeClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"conversation.txt"))
+ {
+ Console.WriteLine("Error: File 'conversation.txt' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"conversation.txt");
+ var response = await deepgramClient.AnalyzeFile(
+ audioData,
+ new AnalyzeSchema()
+ {
+ Language = "en",
+ Topics = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/analyze/topic/conversation.txt b/examples/analyze/topic/conversation.txt
new file mode 100644
index 00000000..2af89e08
--- /dev/null
+++ b/examples/analyze/topic/conversation.txt
@@ -0,0 +1,71 @@
+Meet Deepgram Aura: real-time text-to-speech for real-time AI agents
+----------
+It’s been a year since large language models (LLMs) seemingly went mainstream overnight (Happy Birthday, ChatGPT!!!), and the world has witnessed both rapid development of these technologies and immense interest in their potential. We believe that we have reached an inflection point where voice-based interfaces will be the primary means to accessing LLMs and the experiences they unlock. Here are a few recent signals in support of our thesis:
+
+- Good old fashioned voice notes are enjoying a healthy resurgence.
+
+- According to a recent survey, a majority of respondents stated phone calls are still their preferred communication channel for resolving customer service issues.
+
+- An emerging boom in wearable devices equipped with continuous listening and speech AI technology is gaining steam.
+
+- OpenAI recently enabled voice interactions in ChatGPT.
+
+- A wave of interest in voice-first experiences and tools is sweeping across brands, investors, and tech companies.
+
+Thanks to ChatGPT and the advent of the LLM era, the conversational AI tech stack has advanced sufficiently to support productive (not frustrating) voice-powered AI assistants and agents that can interact with humans in a natural manner. We have already observed this from our most innovative customers who are actively turning to these technologies to build a diverse range of AI agents for voice ordering systems, interview bots, personal AI assistants, automated drive-thru tellers, and autonomous sales and customer service agents.
+
+While these AI agents hold immense potential, many customers have expressed their dissatisfaction with the current crop of voice AI vendors, citing roadblocks related to speed, cost, reliability, and conversational quality. That’s why we’re excited to introduce our own text-to-speech (TTS) API, Deepgram Aura, built for real-time, conversational voice AI agents.
+
+Whether used on its own or in conjunction with our industry-leading Nova-2 speech-to-text API, we’ll soon provide developers with a complete speech AI platform, giving them the essential building blocks they need to build high throughput, real-time AI agents of the future.
+
+We are thrilled about the progress our initial group of developers has made using Aura, so much so that we are extending limited access to a select few partners who will be free to begin integrating with Aura immediately. With their feedback, we’ll continue to enhance our suite of voices and API features, as well as ensure a smooth launch of their production-grade applications.
+
+
+What Customers Want
+----------
+I feel the need, the need for speed
+What we’ve heard from many of our customers and partners is that voice AI technology today caters to two main areas: high production or high throughput.
+
+High Production is all about crafting the perfect voice. It's used in projects where every tone and inflection matters, like in video games or audiobooks, to really bring a scene or story to life. Here, voice quality is king, with creators investing hours to fine-tune every detail for a powerful emotional impact. The primary benefit is the ability to swap out a high-paid voice actor with AI where you have more dynamic control over what’s being said while also achieving some cost savings. But these use cases are more specialized and represent just a sliver of the overall voice AI opportunity.
+
+On the flip side, High Throughput is about handling many quick, one-off interactions for real-time conversations at scale. Think fast food ordering, booking appointments, or inquiring about the latest deals at a car dealership. These tasks are relevant to just about everyone on the planet, and they require fast, efficient text-to-speech conversion for an AI agent to fulfill them. While voice quality is still important to keep users engaged, quality here is more about the naturalness of the flow of conversation and less about sounding like Morgan Freeman. But the primary focus for most customers in this category is on improving customer outcomes, meaning speed and efficiency are must-haves for ensuring these everyday exchanges are smooth and reliable at high volume.
+
+"Deepgram showed me less than 200ms latency today. That's the fastest text-to-speech I’ve ever seen. And our customers would be more than satisfied with the conversation quality."
+
+Jordan Dearsley, Co-founder at Vapi
+
+Although high production use cases seem to be well-served with UI-centric production tools, high throughput, real-time use cases still mostly rely on APIs provided by the major cloud providers. And our customers have been telling us that they’ve been falling short, with insufficient quality for a good user experience, too much latency to make real-time use cases work, and costs too expensive to operate at scale.
+
+
+More human than human
+----------
+With Aura, we’ll give realistic voices to AI agents. Our goal is to craft text-to-speech capabilities that mirror natural human conversations, including timely responses, the incorporation of natural speech fillers like 'um' and 'uh' during contemplation, and the modulation of tone and emotion according to the conversational context. We aim to incorporate laughter and other speech nuances as well. Furthermore, we are dedicated to tailoring these voices to their specific applications, ensuring they remain composed and articulate, particularly in enunciating account numbers and business names with precision.
+
+"I don’t really consider Azure and the other guys anymore because the voices sound so robotic."
+Jordan Dearsley, Co-founder at Vapi
+
+In blind evaluation trials conducted for benchmarking, early versions of Aura have consistently been rated as sounding more human than prominent alternatives, even outranking human speakers for various audio clips more often than not on average. We were pleasantly surprised by these results (stay tuned for a future post containing comprehensive benchmarks for speed and quality soon!), so much so that we’re accelerating our development timeline and publicly announcing today’s waitlist expansion.
+
+Here are some sample clips generated by one of the earliest iterations of Aura. The quality and overall performance will continue to improve with additional model training and refinement. We encourage you to give them a listen and note the naturalness of their cadence, rhythm, and tone in the flow of conversation with another human.
+
+
+Our Approach
+----------
+For nearly a decade, we’ve worked tirelessly to advance the art of the possible in speech recognition and spoken language understanding. Along the way, we’ve transcribed trillions of spoken words into highly accurate transcriptions. Our model research team has developed novel transformer architectures equipped to deal with the nuances of conversational audio–across different languages, accents, and dialects, while handling disfluencies and the changing rhythms, tones, cadences, and inflections that occur in natural, back-and-forth conversations.
+
+And all the while, we’ve purposefully built our models under limited constraints to optimize their speed and efficiency. With support for dozens of languages and custom model training, our technical team has trained and deployed thousands of speech AI models (more than anybody else) which we operate and manage for our customers each day using our own computing infrastructure.
+
+We also have our own in-house data labeling and data ops team with years of experience building bespoke workflows to record, store, and transfer vast amounts of audio in order to label it and continuously grow our bank of high-quality data (millions of hours and counting) used in our model training.
+
+These combined experiences have made us experts in processing and modeling speech audio, especially in support of streaming use cases with our real-time STT models. Our customers have been asking if we could apply the same approach for TTS, and we can.
+
+So what can you expect from Aura? Delivering the same market-leading value and performance as Nova-2 does for STT. Aura is built to be the panacea for speed, quality, and efficiency–the fastest of the high-quality options, and the best quality of the fast ones. And that’s really what end users need and what our customers have been asking us to build.
+
+"Deepgram is a valued partner, providing our customers with high throughput speech-to-text that delivers unrivaled performance without tradeoffs between quality, speed, and cost. We're excited to see Deepgram extend their speech AI platform and bring this approach to the text-to-speech market." - Richard Dumas, VP AI Product Strategy at Five9
+
+
+What's Next
+----------
+As we’ve discussed, scaled voice agents are a high throughput use case, and we believe their success will ultimately depend on a unified approach to audio, one that strikes the right balance between natural voice quality, responsiveness, and cost-efficiency. And with Aura, we’re just getting started. We’re looking forward to continuing to work with customers like Asurion and partners like Five9 across speech-to-text AND text-to-speech as we help them define the future of AI agents, and we invite you to join us on this journey.
+
+We expect to release generally early next year, but if you’re working on any real-time AI agent use cases, join our waitlist today to jumpstart your development in production as we continue to refine our model and API features with your direct feedback.
\ No newline at end of file
diff --git a/examples/prerecorded/file/Bueller-Life-moves-pretty-fast.wav b/examples/prerecorded/file/Bueller-Life-moves-pretty-fast.wav
new file mode 100644
index 00000000..ca08d921
Binary files /dev/null and b/examples/prerecorded/file/Bueller-Life-moves-pretty-fast.wav differ
diff --git a/examples/prerecorded/file/PreRecorded.csproj b/examples/prerecorded/file/PreRecorded.csproj
new file mode 100644
index 00000000..4caa2d7f
--- /dev/null
+++ b/examples/prerecorded/file/PreRecorded.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/prerecorded/file/Program.cs b/examples/prerecorded/file/Program.cs
index e2950393..10993235 100644
--- a/examples/prerecorded/file/Program.cs
+++ b/examples/prerecorded/file/Program.cs
@@ -22,14 +22,22 @@ static async Task Main(string[] args)
// Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
var deepgramClient = new PreRecordedClient();
- var response = await deepgramClient.TranscribeUrl(
- new UrlSource("https://static.deepgram.com/examples/Bueller-Life-moves-pretty-fast.wav"),
+ // check to see if the file exists
+ if (!File.Exists(@"Bueller-Life-moves-pretty-fast.wav"))
+ {
+ Console.WriteLine("Error: File 'Bueller-Life-moves-pretty-fast.wav' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"Bueller-Life-moves-pretty-fast.wav");
+ var response = await deepgramClient.TranscribeFile(
+ audioData,
new PrerecordedSchema()
{
Model = "nova-2",
+ Punctuate = true,
});
- //Console.WriteLine(response);
Console.WriteLine(JsonSerializer.Serialize(response));
Console.ReadKey();
diff --git a/examples/prerecorded/intent/CallCenterPhoneCall.mp3 b/examples/prerecorded/intent/CallCenterPhoneCall.mp3
new file mode 100644
index 00000000..75de2abc
Binary files /dev/null and b/examples/prerecorded/intent/CallCenterPhoneCall.mp3 differ
diff --git a/examples/prerecorded/intent/PreRecorded.csproj b/examples/prerecorded/intent/PreRecorded.csproj
new file mode 100644
index 00000000..151279f7
--- /dev/null
+++ b/examples/prerecorded/intent/PreRecorded.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/prerecorded/intent/Program.cs b/examples/prerecorded/intent/Program.cs
new file mode 100644
index 00000000..f8a43348
--- /dev/null
+++ b/examples/prerecorded/intent/Program.cs
@@ -0,0 +1,46 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.PreRecorded.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new PreRecordedClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"CallCenterPhoneCall.mp3"))
+ {
+ Console.WriteLine("Error: File 'CallCenterPhoneCall.mp3' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"CallCenterPhoneCall.mp3");
+ var response = await deepgramClient.TranscribeFile(
+ audioData,
+ new PrerecordedSchema()
+ {
+ Model = "nova-2",
+ Punctuate = true,
+ Intents = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/prerecorded/sentiment/CallCenterPhoneCall.mp3 b/examples/prerecorded/sentiment/CallCenterPhoneCall.mp3
new file mode 100644
index 00000000..75de2abc
Binary files /dev/null and b/examples/prerecorded/sentiment/CallCenterPhoneCall.mp3 differ
diff --git a/examples/prerecorded/sentiment/PreRecorded.csproj b/examples/prerecorded/sentiment/PreRecorded.csproj
new file mode 100644
index 00000000..151279f7
--- /dev/null
+++ b/examples/prerecorded/sentiment/PreRecorded.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/prerecorded/sentiment/Program.cs b/examples/prerecorded/sentiment/Program.cs
new file mode 100644
index 00000000..f47ef6f8
--- /dev/null
+++ b/examples/prerecorded/sentiment/Program.cs
@@ -0,0 +1,47 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.PreRecorded.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new PreRecordedClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"CallCenterPhoneCall.mp3"))
+ {
+ Console.WriteLine("Error: File 'CallCenterPhoneCall.mp3' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"CallCenterPhoneCall.mp3");
+ var response = await deepgramClient.TranscribeFile(
+ audioData,
+ new PrerecordedSchema()
+ {
+ Model = "nova-2",
+ Punctuate = true,
+ Utterances = true,
+ Sentiment = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/prerecorded/summary/CallCenterPhoneCall.mp3 b/examples/prerecorded/summary/CallCenterPhoneCall.mp3
new file mode 100644
index 00000000..75de2abc
Binary files /dev/null and b/examples/prerecorded/summary/CallCenterPhoneCall.mp3 differ
diff --git a/examples/prerecorded/summary/PreRecorded.csproj b/examples/prerecorded/summary/PreRecorded.csproj
new file mode 100644
index 00000000..151279f7
--- /dev/null
+++ b/examples/prerecorded/summary/PreRecorded.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/prerecorded/summary/Program.cs b/examples/prerecorded/summary/Program.cs
new file mode 100644
index 00000000..8cb445c1
--- /dev/null
+++ b/examples/prerecorded/summary/Program.cs
@@ -0,0 +1,46 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.PreRecorded.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new PreRecordedClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"CallCenterPhoneCall.mp3"))
+ {
+ Console.WriteLine("Error: File 'CallCenterPhoneCall.mp3' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"CallCenterPhoneCall.mp3");
+ var response = await deepgramClient.TranscribeFile(
+ audioData,
+ new PrerecordedSchema()
+ {
+ Model = "nova-2",
+ Punctuate = true,
+ Summarize = "v2",
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/prerecorded/topic/CallCenterPhoneCall.mp3 b/examples/prerecorded/topic/CallCenterPhoneCall.mp3
new file mode 100644
index 00000000..75de2abc
Binary files /dev/null and b/examples/prerecorded/topic/CallCenterPhoneCall.mp3 differ
diff --git a/examples/prerecorded/topic/PreRecorded.csproj b/examples/prerecorded/topic/PreRecorded.csproj
new file mode 100644
index 00000000..151279f7
--- /dev/null
+++ b/examples/prerecorded/topic/PreRecorded.csproj
@@ -0,0 +1,28 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Always
+
+
+
+
diff --git a/examples/prerecorded/topic/Program.cs b/examples/prerecorded/topic/Program.cs
new file mode 100644
index 00000000..d5b9914b
--- /dev/null
+++ b/examples/prerecorded/topic/Program.cs
@@ -0,0 +1,46 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.PreRecorded.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new PreRecordedClient();
+
+ // check to see if the file exists
+ if (!File.Exists(@"CallCenterPhoneCall.mp3"))
+ {
+ Console.WriteLine("Error: File 'CallCenterPhoneCall.mp3' not found.");
+ return;
+ }
+
+ var audioData = File.ReadAllBytes(@"CallCenterPhoneCall.mp3");
+ var response = await deepgramClient.TranscribeFile(
+ audioData,
+ new PrerecordedSchema()
+ {
+ Model = "nova-2",
+ Punctuate = true,
+ Topics = true,
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/prerecorded/file/Prerecorded.csproj b/examples/prerecorded/url/PreRecorded.csproj
similarity index 95%
rename from examples/prerecorded/file/Prerecorded.csproj
rename to examples/prerecorded/url/PreRecorded.csproj
index 4721fb83..b1473617 100644
--- a/examples/prerecorded/file/Prerecorded.csproj
+++ b/examples/prerecorded/url/PreRecorded.csproj
@@ -1,22 +1,22 @@
-
-
-
- Exe
- net6.0
- enable
- enable
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/prerecorded/url/Program.cs b/examples/prerecorded/url/Program.cs
new file mode 100644
index 00000000..b82b131a
--- /dev/null
+++ b/examples/prerecorded/url/Program.cs
@@ -0,0 +1,36 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.PreRecorded.v1;
+
+namespace PreRecorded
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new PreRecordedClient();
+
+ var response = await deepgramClient.TranscribeUrl(
+ new UrlSource("https://static.deepgram.com/examples/Bueller-Life-moves-pretty-fast.wav"),
+ new PrerecordedSchema()
+ {
+ Model = "nova-2",
+ });
+
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/speak/file/woodchuck/Program.cs b/examples/speak/file/woodchuck/Program.cs
new file mode 100644
index 00000000..0297caf2
--- /dev/null
+++ b/examples/speak/file/woodchuck/Program.cs
@@ -0,0 +1,39 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram;
+using Deepgram.Models.Speak.v1;
+
+namespace SampleApp
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var deepgramClient = new SpeakClient();
+
+ var response = await deepgramClient.ToFile(
+ new TextSource("How much wood could a woodchuck chuck? If a woodchuck could chuck wood? As much wood as a woodchuck could chuck, if a woodchuck could chuck wood."),
+ "test.mp3",
+ new SpeakSchema()
+ {
+ Model = "aura-asteria-en",
+ });
+
+ //Console.WriteLine(response);
+ Console.WriteLine(JsonSerializer.Serialize(response));
+ Console.ReadKey();
+
+ // Teardown Library
+ Library.Terminate();
+ }
+ }
+}
\ No newline at end of file
diff --git a/examples/speak/file/woodchuck/Speak.csproj b/examples/speak/file/woodchuck/Speak.csproj
new file mode 100644
index 00000000..85435d70
--- /dev/null
+++ b/examples/speak/file/woodchuck/Speak.csproj
@@ -0,0 +1,15 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
diff --git a/tests/edge_cases/keepalive/KeepAlive.csproj b/tests/edge_cases/keepalive/KeepAlive.csproj
new file mode 100644
index 00000000..c1f1063c
--- /dev/null
+++ b/tests/edge_cases/keepalive/KeepAlive.csproj
@@ -0,0 +1,18 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/edge_cases/keepalive/Program.cs b/tests/edge_cases/keepalive/Program.cs
new file mode 100644
index 00000000..cbc4c3ae
--- /dev/null
+++ b/tests/edge_cases/keepalive/Program.cs
@@ -0,0 +1,77 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using Deepgram.Models.Authenticate.v1;
+using Deepgram.Models.Live.v1;
+using Deepgram.Logger;
+
+namespace SampleApp
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ Library.Initialize();
+ // OR very chatty logging
+ //Library.Initialize(LogLevel.Debug); // LogLevel.Default, LogLevel.Debug, LogLevel.Verbose
+
+ Console.WriteLine("\n\nPress any key to stop and exit...\n\n\n");
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ DeepgramWsClientOptions options = new DeepgramWsClientOptions(null, null, true);
+ var liveClient = new LiveClient("", options);
+
+ // Subscribe to the EventResponseReceived event
+ liveClient._openReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._resultsReceived += (sender, e) =>
+ {
+ if (e.Channel.Alternatives[0].Transcript == "")
+ {
+ return;
+ }
+
+ // Console.WriteLine("Transcription received: " + JsonSerializer.Serialize(e.Transcription));
+ Console.WriteLine($"\n\n\nSpeaker: {e.Channel.Alternatives[0].Transcript}\n\n\n");
+ };
+ liveClient._closeReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._errorReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received. Error: {e.Message}");
+ };
+
+ // Start the connection
+ var liveSchema = new LiveSchema()
+ {
+ Model = "nova-2",
+ Encoding = "linear16",
+ SampleRate = 16000,
+ Punctuate = true,
+ SmartFormat = true,
+ };
+ await liveClient.Connect(liveSchema);
+
+ // Wait for the user to press a key
+ Console.WriteLine("\n\nWe are intentionally waiting here to test the KeepAlive functionality...");
+ Console.WriteLine("Press any key to stop and exit...");
+ Console.ReadKey();
+
+ // Stop the connection
+ await liveClient.Stop();
+
+ // Dispose the client
+ liveClient.Dispose();
+
+ // Terminate Libraries
+ Library.Terminate();
+ }
+ }
+}
diff --git a/tests/edge_cases/reconnect_same_object/Program.cs b/tests/edge_cases/reconnect_same_object/Program.cs
new file mode 100644
index 00000000..17e2e26f
--- /dev/null
+++ b/tests/edge_cases/reconnect_same_object/Program.cs
@@ -0,0 +1,118 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using System.Text.Json;
+
+using Deepgram.Models.Authenticate.v1;
+using Deepgram.Models.Live.v1;
+using Deepgram.Logger;
+using Deepgram.Microphone;
+using System.Threading;
+
+namespace SampleApp
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Initialize Library with default logging
+ // Normal logging is "Info" level
+ //Deepgram.Library.Initialize();
+ // OR very chatty logging
+ Deepgram.Library.Initialize(LogLevel.Debug); // LogLevel.Default, LogLevel.Debug, LogLevel.Verbose
+ Deepgram.Microphone.Library.Initialize();
+
+ Console.WriteLine("\n\nPress any key to stop and exit...\n\n\n");
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ DeepgramWsClientOptions options = new DeepgramWsClientOptions(null, null, true);
+ var liveClient = new LiveClient("", options);
+ // OR
+ //var liveClient = new LiveClienkt("set your DEEPGRAM_API_KEY here");
+
+ // Subscribe to the EventResponseReceived event
+ liveClient._openReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._metadataReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received: {JsonSerializer.Serialize(e)}");
+ };
+ liveClient._resultsReceived += (sender, e) =>
+ {
+ if (e.Channel.Alternatives[0].Transcript == "")
+ {
+ return;
+ }
+
+ // Console.WriteLine("Transcription received: " + JsonSerializer.Serialize(e.Transcription));
+ Console.WriteLine($"\n\n\nSpeaker: {e.Channel.Alternatives[0].Transcript}\n\n\n");
+ };
+ liveClient._speechStartedReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._utteranceEndReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._closeReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._unhandledReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received. Raw: {e.Raw}");
+ };
+ liveClient._errorReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received. Error: {e.Message}");
+ };
+
+ // my own cancellation token
+ //var cancellationToken = new CancellationTokenSource();
+
+ // Start the connection
+ var liveSchema = new LiveSchema()
+ {
+ Model = "nova-2",
+ Encoding = "linear16",
+ SampleRate = 16000,
+ Punctuate = true,
+ SmartFormat = true,
+ InterimResults = true,
+ UtteranceEnd = "1000",
+ VadEvents = true,
+ };
+ //await liveClient.Connect(liveSchema, cancellationToken);
+ await liveClient.Connect(liveSchema);
+
+ // Microphone streaming
+ var microphone = new Microphone(liveClient.Send);
+ microphone.Start();
+
+ // Wait for the user to press a key
+ Console.ReadKey();
+
+ Console.WriteLine("Stopping the microphone streaming...");
+ microphone.Stop();
+
+ //// START: test an external cancellation
+ //cancellationToken.Cancel();
+ //Thread.Sleep(10000); // wait 10 seconds to cancel externally
+ //// END: test an external cancellation
+
+ // Stop the connection
+ await liveClient.Stop();
+
+ // Dispose the client
+ liveClient.Dispose();
+
+ // Terminate Libraries
+ Deepgram.Microphone.Library.Terminate();
+ Deepgram.Library.Terminate();
+ }
+ }
+}
diff --git a/tests/edge_cases/reconnect_same_object/ReconnectStreaming.csproj b/tests/edge_cases/reconnect_same_object/ReconnectStreaming.csproj
new file mode 100644
index 00000000..c1f1063c
--- /dev/null
+++ b/tests/edge_cases/reconnect_same_object/ReconnectStreaming.csproj
@@ -0,0 +1,18 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/expected_failures/exercise_timeout/ExerciseTimeout.csproj b/tests/expected_failures/exercise_timeout/ExerciseTimeout.csproj
new file mode 100644
index 00000000..c1f1063c
--- /dev/null
+++ b/tests/expected_failures/exercise_timeout/ExerciseTimeout.csproj
@@ -0,0 +1,18 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/expected_failures/exercise_timeout/Program.cs b/tests/expected_failures/exercise_timeout/Program.cs
new file mode 100644
index 00000000..e1fb5a7c
--- /dev/null
+++ b/tests/expected_failures/exercise_timeout/Program.cs
@@ -0,0 +1,72 @@
+// Copyright 2024 Deepgram .NET SDK contributors. All Rights Reserved.
+// Use of this source code is governed by a MIT license that can be found in the LICENSE file.
+// SPDX-License-Identifier: MIT
+
+using Deepgram.Models.Live.v1;
+using Deepgram.Logger;
+
+namespace SampleApp
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ // Normal logging is "Info" level
+ Library.Initialize();
+ // OR very chatty logging
+ //Library.Initialize(LogLevel.Debug); // LogLevel.Default, LogLevel.Debug, LogLevel.Verbose
+
+ // Set "DEEPGRAM_API_KEY" environment variable to your Deepgram API Key
+ var liveClient = new LiveClient();
+
+ // Subscribe to the EventResponseReceived event
+ liveClient._openReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._resultsReceived += (sender, e) =>
+ {
+ if (e.Channel.Alternatives[0].Transcript == "")
+ {
+ return;
+ }
+
+ // Console.WriteLine("Transcription received: " + JsonSerializer.Serialize(e.Transcription));
+ Console.WriteLine($"\n\n\nSpeaker: {e.Channel.Alternatives[0].Transcript}\n\n\n");
+ };
+ liveClient._closeReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received");
+ };
+ liveClient._errorReceived += (sender, e) =>
+ {
+ Console.WriteLine($"{e.Type} received. Error: {e.Message}");
+ };
+
+ // Start the connection
+ var liveSchema = new LiveSchema()
+ {
+ Model = "nova-2",
+ Encoding = "linear16",
+ SampleRate = 16000,
+ Punctuate = true,
+ SmartFormat = true,
+ };
+ await liveClient.Connect(liveSchema);
+
+ // Wait for 20 seconds
+ Console.WriteLine("\n\nWe are intentionally looking to timeout here to exercise the functionality.");
+ Console.WriteLine("This should take less than 20 seconds to timeout.");
+ Thread.Sleep(20000);
+
+ // Stop the connection
+ await liveClient.Stop();
+
+ // Dispose the client
+ liveClient.Dispose();
+
+ // Terminate Libraries
+ Deepgram.Library.Terminate();
+ }
+ }
+}