Skip to content

400 Input tokens exceed the configured limit #721

@jinyangxu

Description

@jinyangxu

2025-11-28 11:20:18,916 - src.graph.nodes - ERROR - Full traceback:
Traceback (most recent call last):
File "d:\code\test_code\deer-flow\src\graph\nodes.py", line 978, in execute_agent_step
result = await agent.ainvoke(
^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\pregel_init
.py", line 2892, in ainvoke
async for chunk in self.astream(
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\pregel_init_.py", line 2759, in astream
async for _ in runner.atick(
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\pregel\runner.py", line 283, in atick
await arun_with_retry(
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\pregel\retry.py", line 128, in arun_with_retry
return await task.proc.ainvoke(task.input, config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\utils\runnable.py", line 672, in ainvoke
input = await asyncio.create_task(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\utils\runnable.py", line 431, in ainvoke
ret = await asyncio.create_task(coro, context=context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langgraph\prebuilt\chat_agent_executor.py", line 763, in acall_model
response = cast(AIMessage, await model_runnable.ainvoke(state, config))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_core\runnables\base.py", line 3088, in ainvoke
input_ = await coro_with_context(part(), context, create_task=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_core\runnables\base.py", line 5447, in ainvoke
return await self.bound.ainvoke(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 417, in ainvoke
llm_result = await self.agenerate_prompt(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 991, in agenerate_prompt
return await self.agenerate(
^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 949, in agenerate
raise exceptions[0]
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 1117, in _agenerate_with_cache
result = await self._agenerate(
^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\langchain_openai\chat_models\base.py", line 1199, in _agenerate
response = await self.async_client.create(**payload)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\openai\resources\chat\completions\completions.py", line 2028, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\openai_base_client.py", line 1748, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\code\test_code\deer-flow.venv\Lib\site-packages\openai_base_client.py", line 1555, in request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'message': '400 Input tokens exceed the configured limit of 272000 tokens. Your messages resulted in 339549 tokens. Please reduce the length of the messages.'}
During task with name 'agent' and id '888e5c19-0768-9651-fcab-6eeee8a9d756'

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions