1
1
import unittest
2
2
import os , logging , sys
3
3
4
- sys .path .append (os .path .abspath (os .path .join (os .path .dirname (__file__ ), '..' )))
4
+ from pydantic import BaseModel
5
+
6
+ sys .path .append (os .path .abspath (os .path .join (os .path .dirname (__file__ ), ".." )))
5
7
6
8
from vanilla_aiagents .workflow import Workflow
7
9
from vanilla_aiagents .agent import Agent
8
10
from vanilla_aiagents .sequence import Sequence
9
- from vanilla_aiagents .llm import AzureOpenAILLM
11
+ from vanilla_aiagents .llm import AzureOpenAILLM , LLMConstraints
10
12
from vanilla_aiagents .team import Team
11
13
12
14
from dotenv import load_dotenv
15
+
13
16
load_dotenv (override = True )
14
17
18
+
15
19
class TestLLM (unittest .TestCase ):
16
20
17
21
def setUp (self ):
18
- self .llm = AzureOpenAILLM ({
19
- "azure_deployment" : os .getenv ("AZURE_OPENAI_MODEL" ),
20
- "azure_endpoint" : os .getenv ("AZURE_OPENAI_ENDPOINT" ),
21
- "api_key" : os .getenv ("AZURE_OPENAI_KEY" ),
22
- "api_version" : os .getenv ("AZURE_OPENAI_API_VERSION" ),
23
- })
24
-
22
+ self .llm = AzureOpenAILLM (
23
+ {
24
+ "azure_deployment" : os .getenv ("AZURE_OPENAI_MODEL" ),
25
+ "azure_endpoint" : os .getenv ("AZURE_OPENAI_ENDPOINT" ),
26
+ "api_key" : os .getenv ("AZURE_OPENAI_KEY" ),
27
+ "api_version" : os .getenv ("AZURE_OPENAI_API_VERSION" ),
28
+ }
29
+ )
30
+
25
31
# Set logging to debug for Agent, User, and Workflow
26
32
logging .basicConfig (level = logging .INFO )
27
33
# logging.getLogger("vanilla_aiagents.agent").setLevel(logging.DEBUG)
@@ -30,60 +36,140 @@ def setUp(self):
30
36
31
37
def test_metrics_sequence (self ):
32
38
# Telling the agents to set context variables implies calling a pre-defined function call
33
- first = Agent (id = "first" , llm = self .llm , description = "First agent" , system_message = """You are part of an AI process
39
+ first = Agent (
40
+ id = "first" ,
41
+ llm = self .llm ,
42
+ description = "First agent" ,
43
+ system_message = """You are part of an AI process
34
44
Your task is to set a context for the next agent to continue the conversation.
35
45
36
46
DO set context variable "CHANNEL" to "voice" and "LANGUAGE" to "en"
37
47
38
48
DO respond only "Context set" when you are done.
39
- """ )
49
+ """ ,
50
+ )
40
51
41
52
# Second agent might have its system message extended automatically with the context from the ongoing conversation
42
- second = Agent (id = "second" , llm = self .llm , description = "Second agent" , system_message = """You are part of an AI process
53
+ second = Agent (
54
+ id = "second" ,
55
+ llm = self .llm ,
56
+ description = "Second agent" ,
57
+ system_message = """You are part of an AI process
43
58
Your task is to continue the conversation based on the context set by the previous agent.
44
59
When asked, you can use variable provide in CONTEXT to generate the response.
45
60
46
61
--- CONTEXT ---
47
62
__context__
48
- """ )
49
-
63
+ """ ,
64
+ )
65
+
50
66
flow = Sequence (id = "flow" , description = "" , steps = [first , second ], llm = self .llm )
51
67
workflow = Workflow (askable = flow )
52
-
68
+
53
69
workflow .run ("Which channel is this conversation on?" )
54
-
55
- self .assertGreater (workflow .conversation .metrics .completion_tokens , 0 , "Expected completion_tokens to be greater than 0" )
56
- self .assertGreater (workflow .conversation .metrics .prompt_tokens , 0 , "Expected prompt_tokens to be greater than 0" )
57
- self .assertGreater (workflow .conversation .metrics .total_tokens , 0 , "Expected total_tokens to be greater than 0" )
58
-
70
+
71
+ self .assertGreater (
72
+ workflow .conversation .metrics .completion_tokens ,
73
+ 0 ,
74
+ "Expected completion_tokens to be greater than 0" ,
75
+ )
76
+ self .assertGreater (
77
+ workflow .conversation .metrics .prompt_tokens ,
78
+ 0 ,
79
+ "Expected prompt_tokens to be greater than 0" ,
80
+ )
81
+ self .assertGreater (
82
+ workflow .conversation .metrics .total_tokens ,
83
+ 0 ,
84
+ "Expected total_tokens to be greater than 0" ,
85
+ )
86
+
59
87
def test_metrics_team (self ):
60
88
# Telling the agents to set context variables implies calling a pre-defined function call
61
- first = Agent (id = "first" , llm = self .llm , description = "First agent" , system_message = """You are part of an AI process
89
+ first = Agent (
90
+ id = "first" ,
91
+ llm = self .llm ,
92
+ description = "First agent" ,
93
+ system_message = """You are part of an AI process
62
94
Your task is to set a context for the next agent to continue the conversation.
63
95
64
96
DO set context variable "CHANNEL" to "voice" and "LANGUAGE" to "en"
65
97
66
98
DO respond only "Context set" when you are done.
67
- """ )
99
+ """ ,
100
+ )
68
101
69
102
# Second agent might have its system message extended automatically with the context from the ongoing conversation
70
- second = Agent (id = "second" , llm = self .llm , description = "Second agent" , system_message = """You are part of an AI process
103
+ second = Agent (
104
+ id = "second" ,
105
+ llm = self .llm ,
106
+ description = "Second agent" ,
107
+ system_message = """You are part of an AI process
71
108
Your task is to continue the conversation based on the context set by the previous agent.
72
109
When asked, you can use variable provide in CONTEXT to generate the response.
73
110
74
111
--- CONTEXT ---
75
112
__context__
76
- """ )
77
-
78
- flow = Team (id = "flow" , description = "" , members = [first , second ], llm = self .llm , stop_callback = lambda x : len (x ) > 2 )
113
+ """ ,
114
+ )
115
+
116
+ flow = Team (
117
+ id = "flow" ,
118
+ description = "" ,
119
+ members = [first , second ],
120
+ llm = self .llm ,
121
+ stop_callback = lambda x : len (x ) > 2 ,
122
+ )
79
123
workflow = Workflow (askable = flow )
80
-
81
- workflow .run ("Which channel is this conversation on?" )
82
-
83
- self .assertGreater (workflow .conversation .metrics .completion_tokens , 0 , "Expected completion_tokens to be greater than 0" )
84
- self .assertGreater (workflow .conversation .metrics .prompt_tokens , 0 , "Expected prompt_tokens to be greater than 0" )
85
- self .assertGreater (workflow .conversation .metrics .total_tokens , 0 , "Expected total_tokens to be greater than 0" )
86
124
125
+ workflow .run ("Which channel is this conversation on?" )
87
126
88
- if __name__ == '__main__' :
89
- unittest .main ()
127
+ self .assertGreater (
128
+ workflow .conversation .metrics .completion_tokens ,
129
+ 0 ,
130
+ "Expected completion_tokens to be greater than 0" ,
131
+ )
132
+ self .assertGreater (
133
+ workflow .conversation .metrics .prompt_tokens ,
134
+ 0 ,
135
+ "Expected prompt_tokens to be greater than 0" ,
136
+ )
137
+ self .assertGreater (
138
+ workflow .conversation .metrics .total_tokens ,
139
+ 0 ,
140
+ "Expected total_tokens to be greater than 0" ,
141
+ )
142
+
143
+ def test_constraints (self ):
144
+ llm2 = AzureOpenAILLM (
145
+ {
146
+ "azure_deployment" : "o1-mini" ,
147
+ "azure_endpoint" : os .getenv ("AZURE_OPENAI_ENDPOINT" ),
148
+ "api_key" : os .getenv ("AZURE_OPENAI_KEY" ),
149
+ "api_version" : os .getenv ("AZURE_OPENAI_API_VERSION" ),
150
+ },
151
+ constraints = LLMConstraints (
152
+ temperature = 1 , structured_output = False , system_message = False
153
+ ),
154
+ )
155
+
156
+ class HelloResponse (BaseModel ):
157
+ response : str
158
+
159
+ response , metrics = llm2 .ask (
160
+ messages = [
161
+ {
162
+ "role" : "system" ,
163
+ "content" : """Say hello with JSON format {response: "message"}""" ,
164
+ }
165
+ ],
166
+ response_format = HelloResponse ,
167
+ temperature = 0.7 ,
168
+ )
169
+
170
+ # If this call passes, then the constraints are working
171
+ self .assertIsNotNone (response , "Expected response to be not None" )
172
+
173
+
174
+ if __name__ == "__main__" :
175
+ unittest .main ()
0 commit comments