Skip to content

Commit 0c359e1

Browse files
authored
chore: bump v0.11.3 (#2760)
2 parents 102f3f0 + d4369f6 commit 0c359e1

33 files changed

+400
-236
lines changed

.github/scripts/model-sweep/model_sweep.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def roll_dice(num_sides: int) -> int:
9696
"openai-gpt-4o-mini.json",
9797
# "azure-gpt-4o-mini.json", # TODO: Re-enable on new agent loop
9898
"claude-3-5-sonnet.json",
99-
"claude-3-7-sonnet.json",
99+
"claude-4-sonnet-extended.json",
100100
"claude-3-7-sonnet-extended.json",
101101
"gemini-1.5-pro.json",
102102
"gemini-2.5-flash-vertex.json",

.github/workflows/send-message-integration-tests.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ jobs:
1919
- "openai-gpt-4o-mini.json"
2020
- "azure-gpt-4o-mini.json"
2121
- "claude-3-5-sonnet.json"
22-
- "claude-3-7-sonnet.json"
22+
- "claude-4-sonnet-extended.json"
2323
- "claude-3-7-sonnet-extended.json"
2424
- "gemini-pro.json"
2525
- "gemini-vertex.json"

letta/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
__version__ = version("letta")
66
except PackageNotFoundError:
77
# Fallback for development installations
8-
__version__ = "0.11.2"
8+
__version__ = "0.11.3"
99

1010
if os.environ.get("LETTA_VERSION"):
1111
__version__ = os.environ["LETTA_VERSION"]

letta/agents/letta_agent.py

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,7 @@ async def step_stream_no_tokens(
220220
actor=self.actor,
221221
)
222222
stop_reason = None
223+
job_update_metadata = None
223224
usage = LettaUsageStatistics()
224225

225226
# span for request
@@ -367,6 +368,7 @@ async def step_stream_no_tokens(
367368
except Exception as e:
368369
# Handle any unexpected errors during step processing
369370
self.logger.error(f"Error during step processing: {e}")
371+
job_update_metadata = {"error": str(e)}
370372

371373
# This indicates we failed after we decided to stop stepping, which indicates a bug with our flow.
372374
if not stop_reason:
@@ -429,7 +431,7 @@ async def step_stream_no_tokens(
429431
self.logger.error("Invalid StepProgression value")
430432

431433
if settings.track_stop_reason:
432-
await self._log_request(request_start_timestamp_ns, request_span)
434+
await self._log_request(request_start_timestamp_ns, request_span, job_update_metadata, is_error=True)
433435

434436
except Exception as e:
435437
self.logger.error("Failed to update step: %s", e)
@@ -447,7 +449,7 @@ async def step_stream_no_tokens(
447449
force=False,
448450
)
449451

450-
await self._log_request(request_start_timestamp_ns, request_span)
452+
await self._log_request(request_start_timestamp_ns, request_span, job_update_metadata, is_error=False)
451453

452454
# Return back usage
453455
for finish_chunk in self.get_finish_chunks_for_stream(usage, stop_reason):
@@ -485,6 +487,7 @@ async def _step(
485487
request_span.set_attributes({f"llm_config.{k}": v for k, v in agent_state.llm_config.model_dump().items() if v is not None})
486488

487489
stop_reason = None
490+
job_update_metadata = None
488491
usage = LettaUsageStatistics()
489492
for i in range(max_steps):
490493
# If dry run, build request data and return it without making LLM call
@@ -622,6 +625,7 @@ async def _step(
622625
except Exception as e:
623626
# Handle any unexpected errors during step processing
624627
self.logger.error(f"Error during step processing: {e}")
628+
job_update_metadata = {"error": str(e)}
625629

626630
# This indicates we failed after we decided to stop stepping, which indicates a bug with our flow.
627631
if not stop_reason:
@@ -680,7 +684,7 @@ async def _step(
680684
self.logger.error("Invalid StepProgression value")
681685

682686
if settings.track_stop_reason:
683-
await self._log_request(request_start_timestamp_ns, request_span)
687+
await self._log_request(request_start_timestamp_ns, request_span, job_update_metadata, is_error=True)
684688

685689
except Exception as e:
686690
self.logger.error("Failed to update step: %s", e)
@@ -698,7 +702,7 @@ async def _step(
698702
force=False,
699703
)
700704

701-
await self._log_request(request_start_timestamp_ns, request_span)
705+
await self._log_request(request_start_timestamp_ns, request_span, job_update_metadata, is_error=False)
702706

703707
return current_in_context_messages, new_in_context_messages, stop_reason, usage
704708

@@ -748,6 +752,7 @@ async def step_stream(
748752
actor=self.actor,
749753
)
750754
stop_reason = None
755+
job_update_metadata = None
751756
usage = LettaUsageStatistics()
752757
first_chunk, request_span = True, None
753758
if request_start_timestamp_ns:
@@ -977,6 +982,7 @@ async def step_stream(
977982
except Exception as e:
978983
# Handle any unexpected errors during step processing
979984
self.logger.error(f"Error during step processing: {e}")
985+
job_update_metadata = {"error": str(e)}
980986

981987
# This indicates we failed after we decided to stop stepping, which indicates a bug with our flow.
982988
if not stop_reason:
@@ -1039,7 +1045,7 @@ async def step_stream(
10391045

10401046
# Do tracking for failure cases. Can consolidate with success conditions later.
10411047
if settings.track_stop_reason:
1042-
await self._log_request(request_start_timestamp_ns, request_span)
1048+
await self._log_request(request_start_timestamp_ns, request_span, job_update_metadata, is_error=True)
10431049

10441050
except Exception as e:
10451051
self.logger.error("Failed to update step: %s", e)
@@ -1056,20 +1062,28 @@ async def step_stream(
10561062
force=False,
10571063
)
10581064

1059-
await self._log_request(request_start_timestamp_ns, request_span)
1065+
await self._log_request(request_start_timestamp_ns, request_span, job_update_metadata, is_error=False)
10601066

10611067
for finish_chunk in self.get_finish_chunks_for_stream(usage, stop_reason):
10621068
yield f"data: {finish_chunk}\n\n"
10631069

1064-
async def _log_request(self, request_start_timestamp_ns: int, request_span: "Span | None"):
1070+
async def _log_request(
1071+
self, request_start_timestamp_ns: int, request_span: "Span | None", job_update_metadata: dict | None, is_error: bool
1072+
):
10651073
if request_start_timestamp_ns:
10661074
now_ns, now = get_utc_timestamp_ns(), get_utc_time()
10671075
duration_ns = now_ns - request_start_timestamp_ns
10681076
if request_span:
10691077
request_span.add_event(name="letta_request_ms", attributes={"duration_ms": ns_to_ms(duration_ns)})
10701078
await self._update_agent_last_run_metrics(now, ns_to_ms(duration_ns))
1071-
if self.current_run_id:
1079+
if settings.track_agent_run and self.current_run_id:
10721080
await self.job_manager.record_response_duration(self.current_run_id, duration_ns, self.actor)
1081+
await self.job_manager.safe_update_job_status_async(
1082+
job_id=self.current_run_id,
1083+
new_status=JobStatus.failed if is_error else JobStatus.completed,
1084+
actor=self.actor,
1085+
metadata=job_update_metadata,
1086+
)
10731087
if request_span:
10741088
request_span.end()
10751089

@@ -1507,8 +1521,6 @@ async def _execute_tool(
15071521
status="error",
15081522
)
15091523

1510-
print(target_tool)
1511-
15121524
# TODO: This temp. Move this logic and code to executors
15131525

15141526
if agent_step_span:

letta/constants.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,13 @@ def FUNCTION_RETURN_VALUE_TRUNCATED(return_str, return_char: int, return_char_li
208208
"deepseek-chat": 64000,
209209
"deepseek-reasoner": 64000,
210210
## OpenAI models: https://platform.openai.com/docs/models/overview
211+
# gpt-5
212+
"gpt-5": 400000,
213+
"gpt-5-2025-08-07": 400000,
214+
"gpt-5-mini": 400000,
215+
"gpt-5-mini-2025-08-07": 400000,
216+
"gpt-5-nano": 400000,
217+
"gpt-5-nano-2025-08-07": 400000,
211218
# reasoners
212219
"o1": 200000,
213220
# "o1-pro": 200000, # responses API only

letta/functions/function_sets/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_li
264264
265265
Args:
266266
label (str): Section of the memory to be edited, identified by its label.
267-
new_str (str): The text to insert.
267+
new_str (str): The text to insert. Do not include line number prefixes.
268268
insert_line (int): The line number after which to insert the text (0 for beginning of file). Defaults to -1 (end of the file).
269269
270270
Returns:

letta/helpers/converters.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -395,6 +395,24 @@ def deserialize_agent_step_state(data: Optional[Dict]) -> Optional[AgentStepStat
395395
if not data:
396396
return None
397397

398+
if solver_data := data.get("tool_rules_solver"):
399+
# Get existing tool_rules or reconstruct from categorized fields for backwards compatibility
400+
tool_rules_data = solver_data.get("tool_rules", [])
401+
402+
if not tool_rules_data:
403+
for field_name in (
404+
"init_tool_rules",
405+
"continue_tool_rules",
406+
"child_based_tool_rules",
407+
"parent_tool_rules",
408+
"terminal_tool_rules",
409+
"required_before_exit_tool_rules",
410+
):
411+
if field_data := solver_data.get(field_name):
412+
tool_rules_data.extend(field_data)
413+
414+
solver_data["tool_rules"] = deserialize_tool_rules(tool_rules_data)
415+
398416
return AgentStepState(**data)
399417

400418

@@ -418,6 +436,7 @@ def deserialize_response_format(data: Optional[Dict]) -> Optional[ResponseFormat
418436
return JsonSchemaResponseFormat(**data)
419437
if data["type"] == ResponseFormatType.json_object:
420438
return JsonObjectResponseFormat(**data)
439+
raise ValueError(f"Unknown Response Format type: {data['type']}")
421440

422441

423442
# --------------------------

letta/helpers/json_helpers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def safe_serializer(obj):
1515
try:
1616
return obj.decode("utf-8")
1717
except Exception:
18-
print(f"Error decoding bytes as utf-8: {obj}")
18+
# TODO: this is to handle Gemini thought signatures, b64 decode this back to bytes when sending back to Gemini
1919
return base64.b64encode(obj).decode("utf-8")
2020
raise TypeError(f"Type {type(obj)} not serializable")
2121

0 commit comments

Comments
 (0)