Skip to content

Commit 6ab389f

Browse files
Merge pull request #129 from jon-fox/feature/115-rename-memory-to-history-v2
Rename memory to history
2 parents 28f34fc + ad28b33 commit 6ab389f

File tree

28 files changed

+356
-356
lines changed

28 files changed

+356
-356
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ In Atomic Agents, an agent is composed of several key components:
8686
- **System Prompt:** Defines the agent's behavior and purpose.
8787
- **Input Schema:** Specifies the structure and validation rules for the agent's input.
8888
- **Output Schema:** Specifies the structure and validation rules for the agent's output.
89-
- **Memory:** Stores conversation history or other relevant data.
89+
- **History:** Stores conversation history or other relevant data.
9090
- **Context Providers:** Inject dynamic context into the agent's system prompt at runtime.
9191

9292
Here's a high-level architecture diagram:
@@ -140,7 +140,7 @@ import instructor
140140
from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgentInputSchema
141141
from atomic_agents.lib.base.base_io_schema import BaseIOSchema
142142
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
143-
from atomic_agents.lib.components.agent_memory import AgentMemory
143+
from atomic_agents.lib.components.chat_history import ChatHistory
144144

145145
# Define a custom output schema
146146
class CustomOutputSchema(BaseIOSchema):
@@ -173,7 +173,7 @@ agent = BaseAgent[BaseAgentInputSchema, CustomOutputSchema](
173173
client=client,
174174
model="gpt-4o-mini",
175175
system_prompt_generator=system_prompt_generator,
176-
memory=AgentMemory(),
176+
history=ChatHistory(),
177177
)
178178
)
179179

atomic-agents/atomic_agents/agents/base_agent.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import instructor
22
from pydantic import BaseModel, Field
33
from typing import Optional, Type, Generator, AsyncGenerator, get_args
4-
from atomic_agents.lib.components.agent_memory import AgentMemory
4+
from atomic_agents.lib.components.chat_history import ChatHistory
55
from atomic_agents.lib.components.system_prompt_generator import (
66
SystemPromptContextProviderBase,
77
SystemPromptGenerator,
@@ -60,7 +60,7 @@ class BaseAgentOutputSchema(BaseIOSchema):
6060
class BaseAgentConfig(BaseModel):
6161
client: instructor.client.Instructor = Field(..., description="Client for interacting with the language model.")
6262
model: str = Field(default="gpt-4o-mini", description="The model to use for generating responses.")
63-
memory: Optional[AgentMemory] = Field(default=None, description="Memory component for storing chat history.")
63+
history: Optional[ChatHistory] = Field(default=None, description="History component for storing chat history.")
6464
system_prompt_generator: Optional[SystemPromptGenerator] = Field(
6565
default=None, description="Component for generating system prompts."
6666
)
@@ -75,7 +75,7 @@ class BaseAgent[InputSchema: BaseIOSchema, OutputSchema: BaseIOSchema]:
7575
"""
7676
Base class for chat agents.
7777
78-
This class provides the core functionality for handling chat interactions, including managing memory,
78+
This class provides the core functionality for handling chat interactions, including managing history,
7979
generating system prompts, and obtaining responses from a language model.
8080
8181
Type Parameters:
@@ -85,10 +85,10 @@ class BaseAgent[InputSchema: BaseIOSchema, OutputSchema: BaseIOSchema]:
8585
Attributes:
8686
client: Client for interacting with the language model.
8787
model (str): The model to use for generating responses.
88-
memory (AgentMemory): Memory component for storing chat history.
88+
history (ChatHistory): History component for storing chat history.
8989
system_prompt_generator (SystemPromptGenerator): Component for generating system prompts.
9090
system_role (Optional[str]): The role of the system in the conversation. None means no system prompt.
91-
initial_memory (AgentMemory): Initial state of the memory.
91+
initial_history (ChatHistory): Initial state of the history.
9292
current_user_input (Optional[InputSchema]): The current user input being processed.
9393
model_api_parameters (dict): Additional parameters passed to the API provider.
9494
- Use this for parameters like 'temperature', 'max_tokens', etc.
@@ -103,18 +103,18 @@ def __init__(self, config: BaseAgentConfig):
103103
"""
104104
self.client = config.client
105105
self.model = config.model
106-
self.memory = config.memory or AgentMemory()
106+
self.history = config.history or ChatHistory()
107107
self.system_prompt_generator = config.system_prompt_generator or SystemPromptGenerator()
108108
self.system_role = config.system_role
109-
self.initial_memory = self.memory.copy()
109+
self.initial_history = self.history.copy()
110110
self.current_user_input = None
111111
self.model_api_parameters = config.model_api_parameters or {}
112112

113-
def reset_memory(self):
113+
def reset_history(self):
114114
"""
115-
Resets the memory to its initial state.
115+
Resets the history to its initial state.
116116
"""
117-
self.memory = self.initial_memory.copy()
117+
self.history = self.initial_history.copy()
118118

119119
@property
120120
def input_schema(self) -> Type[BaseIOSchema]:
@@ -145,14 +145,14 @@ def _prepare_messages(self):
145145
}
146146
]
147147

148-
self.messages += self.memory.get_history()
148+
self.messages += self.history.get_history()
149149

150150
def run(self, user_input: Optional[InputSchema] = None) -> OutputSchema:
151151
"""
152152
Runs the chat agent with the given user input synchronously.
153153
154154
Args:
155-
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to memory.
155+
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to history.
156156
157157
Returns:
158158
OutputSchema: The response from the chat agent.
@@ -161,9 +161,9 @@ def run(self, user_input: Optional[InputSchema] = None) -> OutputSchema:
161161
self.client, instructor.client.AsyncInstructor
162162
), "The run method is not supported for async clients. Use run_async instead."
163163
if user_input:
164-
self.memory.initialize_turn()
164+
self.history.initialize_turn()
165165
self.current_user_input = user_input
166-
self.memory.add_message("user", user_input)
166+
self.history.add_message("user", user_input)
167167

168168
self._prepare_messages()
169169
response = self.client.chat.completions.create(
@@ -172,7 +172,7 @@ def run(self, user_input: Optional[InputSchema] = None) -> OutputSchema:
172172
response_model=self.output_schema,
173173
**self.model_api_parameters,
174174
)
175-
self.memory.add_message("assistant", response)
175+
self.history.add_message("assistant", response)
176176

177177
return response
178178

@@ -181,7 +181,7 @@ def run_stream(self, user_input: Optional[InputSchema] = None) -> Generator[Outp
181181
Runs the chat agent with the given user input, supporting streaming output.
182182
183183
Args:
184-
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to memory.
184+
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to history.
185185
186186
Yields:
187187
OutputSchema: Partial responses from the chat agent.
@@ -193,9 +193,9 @@ def run_stream(self, user_input: Optional[InputSchema] = None) -> Generator[Outp
193193
self.client, instructor.client.AsyncInstructor
194194
), "The run_stream method is not supported for async clients. Use run_async instead."
195195
if user_input:
196-
self.memory.initialize_turn()
196+
self.history.initialize_turn()
197197
self.current_user_input = user_input
198-
self.memory.add_message("user", user_input)
198+
self.history.add_message("user", user_input)
199199

200200
self._prepare_messages()
201201

@@ -211,7 +211,7 @@ def run_stream(self, user_input: Optional[InputSchema] = None) -> Generator[Outp
211211
yield partial_response
212212

213213
full_response_content = self.output_schema(**partial_response.model_dump())
214-
self.memory.add_message("assistant", full_response_content)
214+
self.history.add_message("assistant", full_response_content)
215215

216216
return full_response_content
217217

@@ -220,7 +220,7 @@ async def run_async(self, user_input: Optional[InputSchema] = None) -> OutputSch
220220
Runs the chat agent asynchronously with the given user input.
221221
222222
Args:
223-
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to memory.
223+
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to history.
224224
225225
Returns:
226226
OutputSchema: The response from the chat agent.
@@ -231,34 +231,34 @@ async def run_async(self, user_input: Optional[InputSchema] = None) -> OutputSch
231231
"""
232232
assert isinstance(self.client, instructor.client.AsyncInstructor), "The run_async method is for async clients."
233233
if user_input:
234-
self.memory.initialize_turn()
234+
self.history.initialize_turn()
235235
self.current_user_input = user_input
236-
self.memory.add_message("user", user_input)
236+
self.history.add_message("user", user_input)
237237

238238
self._prepare_messages()
239239

240240
response = await self.client.chat.completions.create(
241241
model=self.model, messages=self.messages, response_model=self.output_schema, **self.model_api_parameters
242242
)
243243

244-
self.memory.add_message("assistant", response)
244+
self.history.add_message("assistant", response)
245245
return response
246246

247247
async def run_async_stream(self, user_input: Optional[InputSchema] = None) -> AsyncGenerator[OutputSchema, None]:
248248
"""
249249
Runs the chat agent asynchronously with the given user input, supporting streaming output.
250250
251251
Args:
252-
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to memory.
252+
user_input (Optional[InputSchema]): The input from the user. If not provided, skips adding to history.
253253
254254
Yields:
255255
OutputSchema: Partial responses from the chat agent.
256256
"""
257257
assert isinstance(self.client, instructor.client.AsyncInstructor), "The run_async method is for async clients."
258258
if user_input:
259-
self.memory.initialize_turn()
259+
self.history.initialize_turn()
260260
self.current_user_input = user_input
261-
self.memory.add_message("user", user_input)
261+
self.history.add_message("user", user_input)
262262

263263
self._prepare_messages()
264264

@@ -277,7 +277,7 @@ async def run_async_stream(self, user_input: Optional[InputSchema] = None) -> As
277277

278278
if last_response:
279279
full_response_content = self.output_schema(**last_response.model_dump())
280-
self.memory.add_message("assistant", full_response_content)
280+
self.history.add_message("assistant", full_response_content)
281281

282282
def get_context_provider(self, provider_name: str) -> Type[SystemPromptContextProviderBase]:
283283
"""
@@ -365,7 +365,7 @@ def _create_config_table(agent: BaseAgent) -> Table:
365365
info_table.add_column("Value", style="yellow")
366366

367367
info_table.add_row("Model", agent.model)
368-
info_table.add_row("Memory", str(type(agent.memory).__name__))
368+
info_table.add_row("History", str(type(agent.history).__name__))
369369
info_table.add_row("System Prompt Generator", str(type(agent.system_prompt_generator).__name__))
370370

371371
return info_table

0 commit comments

Comments
 (0)