Skip to content

Commit a6df8ea

Browse files
authored
Features/feedback planner (#2)
* add: feedback and repeat option PlannedTeam instead of stop_callback add: new unit tests add: vscode formatter settings * fix: make feedback variable name configurable * add: note to support plan resuming * update: README to include semver logo fix: broken tests * add: refine notebook comments and docs * fix: run test workflow only when code changes add: improve README
1 parent a9603f3 commit a6df8ea

File tree

10 files changed

+1006
-181
lines changed

10 files changed

+1006
-181
lines changed

.github/workflows/pytest.yml

+4
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,11 @@ on:
44
push:
55
branches:
66
- main
7+
paths:
8+
- 'vanilla_aiagents/**'
79
pull_request:
10+
paths:
11+
- 'vanilla_aiagents/**'
812
workflow_dispatch:
913

1014
jobs:

.vscode/settings.json

+9-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,11 @@
11
{
2-
"python.terminal.activateEnvironment": true
2+
"python.terminal.activateEnvironment": true,
3+
"editor.defaultFormatter": "ms-python.black-formatter",
4+
"editor.formatOnSave": true,
5+
"[python]": {
6+
"editor.codeActionsOnSave": {
7+
"source.organizeImports": true,
8+
"source.unusedImports": "always"
9+
}
10+
}
311
}

README.md

+8-6
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,11 @@
33

44
<h3 align="center">Vanilla AI Agents</h3>
55

6-
<a href="https://github.com/azure-samples/vanilla-aiagents/main/LICENSE.md"><img src="https://img.shields.io/github/license/Azure-Samples/vanilla-aiagents" alt="License" /></a>
7-
<a href="https://github.com/Azure-Samples/vanilla-aiagents/actions/workflows/pytest.yml"><img src="https://github.com/Azure-Samples/vanilla-aiagents/actions/workflows/pytest.yml/badge.svg" alt="Test status" /></a>
8-
<a href="https://github.com/Azure-Samples/vanilla-aiagents/releases"><img src="https://img.shields.io/github/v/release/Azure-Samples/vanilla-aiagents" alt="Releases" /></a>
9-
<a href="https://Azure-Samples.github.io/vanilla-aiagents/"><img src="https://img.shields.io/badge/GitHub%20Pages-Online-success" alt="GitHub Pages" /></a>
6+
<a href="https://github.com/azure-samples/vanilla-aiagents/main/LICENSE.md"><img src="https://img.shields.io/github/license/Azure-Samples/vanilla-aiagents" alt="License" /></a>
7+
<a href="https://github.com/Azure-Samples/vanilla-aiagents/actions/workflows/pytest.yml"><img src="https://github.com/Azure-Samples/vanilla-aiagents/actions/workflows/pytest.yml/badge.svg" alt="Test status" /></a>
8+
<a href="https://github.com/Azure-Samples/vanilla-aiagents/releases"><img src="https://img.shields.io/github/v/release/Azure-Samples/vanilla-aiagents" alt="Releases" /></a>
9+
<a href="https://Azure-Samples.github.io/vanilla-aiagents/"><img src="https://img.shields.io/badge/GitHub%20Pages-Online-success" alt="GitHub Pages" /></a>
10+
<a href="https://semver.org/"><img src="https://img.shields.io/badge/semver-2.0.0-blue" alt="Semver 2.0.0" /></a>
1011

1112
<p>Lightweight library demonstrating how to create agenting application without using any specific framework.</p>
1213
</div>
@@ -25,8 +26,9 @@
2526

2627
This project framework provides the following features:
2728

28-
- Multi-agent chat
29-
- Agent routing (including option to look for available tools to decide)
29+
- Multi-agent chat with several orchestration options
30+
- Dynamic routing (including option to look for available tools to decide)
31+
- Beforehand planning with optional repetition via feedback loop
3032
- Agent state management
3133
- Custom stop conditions
3234
- Interactive or unattended user input

notebooks/planned_team.ipynb

+1-2
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,7 @@
116116
"from vanilla_aiagents.conversation import SummarizeMessagesStrategy\n",
117117
"flow = PlannedTeam(id=\"flow\", description=\"\", \n",
118118
" members=[collector, approver, responder], \n",
119-
" llm=llm, \n",
120-
" stop_callback=lambda msgs: len(msgs) > 6, \n",
119+
" llm=llm,\n",
121120
" fork_conversation=True,\n",
122121
" fork_strategy=SummarizeMessagesStrategy(llm, \"Summarize the conversation, provide a list of key information, decisions taken and eventual outcomes.\"))\n",
123122
"workflow = Workflow(askable=flow)"

notebooks/planned_team_feedback.ipynb

+444
Large diffs are not rendered by default.

vanilla_aiagents/tests/test_llm.py

+120-34
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,33 @@
11
import unittest
22
import os, logging, sys
33

4-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
4+
from pydantic import BaseModel
5+
6+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
57

68
from vanilla_aiagents.workflow import Workflow
79
from vanilla_aiagents.agent import Agent
810
from vanilla_aiagents.sequence import Sequence
9-
from vanilla_aiagents.llm import AzureOpenAILLM
11+
from vanilla_aiagents.llm import AzureOpenAILLM, LLMConstraints
1012
from vanilla_aiagents.team import Team
1113

1214
from dotenv import load_dotenv
15+
1316
load_dotenv(override=True)
1417

18+
1519
class TestLLM(unittest.TestCase):
1620

1721
def setUp(self):
18-
self.llm = AzureOpenAILLM({
19-
"azure_deployment": os.getenv("AZURE_OPENAI_MODEL"),
20-
"azure_endpoint": os.getenv("AZURE_OPENAI_ENDPOINT"),
21-
"api_key": os.getenv("AZURE_OPENAI_KEY"),
22-
"api_version": os.getenv("AZURE_OPENAI_API_VERSION"),
23-
})
24-
22+
self.llm = AzureOpenAILLM(
23+
{
24+
"azure_deployment": os.getenv("AZURE_OPENAI_MODEL"),
25+
"azure_endpoint": os.getenv("AZURE_OPENAI_ENDPOINT"),
26+
"api_key": os.getenv("AZURE_OPENAI_KEY"),
27+
"api_version": os.getenv("AZURE_OPENAI_API_VERSION"),
28+
}
29+
)
30+
2531
# Set logging to debug for Agent, User, and Workflow
2632
logging.basicConfig(level=logging.INFO)
2733
# logging.getLogger("vanilla_aiagents.agent").setLevel(logging.DEBUG)
@@ -30,60 +36,140 @@ def setUp(self):
3036

3137
def test_metrics_sequence(self):
3238
# Telling the agents to set context variables implies calling a pre-defined function call
33-
first = Agent(id="first", llm=self.llm, description="First agent", system_message = """You are part of an AI process
39+
first = Agent(
40+
id="first",
41+
llm=self.llm,
42+
description="First agent",
43+
system_message="""You are part of an AI process
3444
Your task is to set a context for the next agent to continue the conversation.
3545
3646
DO set context variable "CHANNEL" to "voice" and "LANGUAGE" to "en"
3747
3848
DO respond only "Context set" when you are done.
39-
""")
49+
""",
50+
)
4051

4152
# Second agent might have its system message extended automatically with the context from the ongoing conversation
42-
second = Agent(id="second", llm=self.llm, description="Second agent", system_message = """You are part of an AI process
53+
second = Agent(
54+
id="second",
55+
llm=self.llm,
56+
description="Second agent",
57+
system_message="""You are part of an AI process
4358
Your task is to continue the conversation based on the context set by the previous agent.
4459
When asked, you can use variable provide in CONTEXT to generate the response.
4560
4661
--- CONTEXT ---
4762
__context__
48-
""")
49-
63+
""",
64+
)
65+
5066
flow = Sequence(id="flow", description="", steps=[first, second], llm=self.llm)
5167
workflow = Workflow(askable=flow)
52-
68+
5369
workflow.run("Which channel is this conversation on?")
54-
55-
self.assertGreater(workflow.conversation.metrics.completion_tokens, 0, "Expected completion_tokens to be greater than 0")
56-
self.assertGreater(workflow.conversation.metrics.prompt_tokens, 0, "Expected prompt_tokens to be greater than 0")
57-
self.assertGreater(workflow.conversation.metrics.total_tokens, 0, "Expected total_tokens to be greater than 0")
58-
70+
71+
self.assertGreater(
72+
workflow.conversation.metrics.completion_tokens,
73+
0,
74+
"Expected completion_tokens to be greater than 0",
75+
)
76+
self.assertGreater(
77+
workflow.conversation.metrics.prompt_tokens,
78+
0,
79+
"Expected prompt_tokens to be greater than 0",
80+
)
81+
self.assertGreater(
82+
workflow.conversation.metrics.total_tokens,
83+
0,
84+
"Expected total_tokens to be greater than 0",
85+
)
86+
5987
def test_metrics_team(self):
6088
# Telling the agents to set context variables implies calling a pre-defined function call
61-
first = Agent(id="first", llm=self.llm, description="First agent", system_message = """You are part of an AI process
89+
first = Agent(
90+
id="first",
91+
llm=self.llm,
92+
description="First agent",
93+
system_message="""You are part of an AI process
6294
Your task is to set a context for the next agent to continue the conversation.
6395
6496
DO set context variable "CHANNEL" to "voice" and "LANGUAGE" to "en"
6597
6698
DO respond only "Context set" when you are done.
67-
""")
99+
""",
100+
)
68101

69102
# Second agent might have its system message extended automatically with the context from the ongoing conversation
70-
second = Agent(id="second", llm=self.llm, description="Second agent", system_message = """You are part of an AI process
103+
second = Agent(
104+
id="second",
105+
llm=self.llm,
106+
description="Second agent",
107+
system_message="""You are part of an AI process
71108
Your task is to continue the conversation based on the context set by the previous agent.
72109
When asked, you can use variable provide in CONTEXT to generate the response.
73110
74111
--- CONTEXT ---
75112
__context__
76-
""")
77-
78-
flow = Team(id="flow", description="", members=[first, second], llm=self.llm, stop_callback=lambda x: len(x) > 2)
113+
""",
114+
)
115+
116+
flow = Team(
117+
id="flow",
118+
description="",
119+
members=[first, second],
120+
llm=self.llm,
121+
stop_callback=lambda x: len(x) > 2,
122+
)
79123
workflow = Workflow(askable=flow)
80-
81-
workflow.run("Which channel is this conversation on?")
82-
83-
self.assertGreater(workflow.conversation.metrics.completion_tokens, 0, "Expected completion_tokens to be greater than 0")
84-
self.assertGreater(workflow.conversation.metrics.prompt_tokens, 0, "Expected prompt_tokens to be greater than 0")
85-
self.assertGreater(workflow.conversation.metrics.total_tokens, 0, "Expected total_tokens to be greater than 0")
86124

125+
workflow.run("Which channel is this conversation on?")
87126

88-
if __name__ == '__main__':
89-
unittest.main()
127+
self.assertGreater(
128+
workflow.conversation.metrics.completion_tokens,
129+
0,
130+
"Expected completion_tokens to be greater than 0",
131+
)
132+
self.assertGreater(
133+
workflow.conversation.metrics.prompt_tokens,
134+
0,
135+
"Expected prompt_tokens to be greater than 0",
136+
)
137+
self.assertGreater(
138+
workflow.conversation.metrics.total_tokens,
139+
0,
140+
"Expected total_tokens to be greater than 0",
141+
)
142+
143+
def test_constraints(self):
144+
llm2 = AzureOpenAILLM(
145+
{
146+
"azure_deployment": "o1-mini",
147+
"azure_endpoint": os.getenv("AZURE_OPENAI_ENDPOINT"),
148+
"api_key": os.getenv("AZURE_OPENAI_KEY"),
149+
"api_version": os.getenv("AZURE_OPENAI_API_VERSION"),
150+
},
151+
constraints=LLMConstraints(
152+
temperature=1, structured_output=False, system_message=False
153+
),
154+
)
155+
156+
class HelloResponse(BaseModel):
157+
response: str
158+
159+
response, metrics = llm2.ask(
160+
messages=[
161+
{
162+
"role": "system",
163+
"content": """Say hello with JSON format {response: "message"}""",
164+
}
165+
],
166+
response_format=HelloResponse,
167+
temperature=0.7,
168+
)
169+
170+
# If this call passes, then the constraints are working
171+
self.assertIsNotNone(response, "Expected response to be not None")
172+
173+
174+
if __name__ == "__main__":
175+
unittest.main()

0 commit comments

Comments
 (0)