@@ -155,9 +155,9 @@ async def _initialize_engines(self):
155155 request_logger = None ,
156156 chat_template = self .tokenizer .tokenizer .chat_template ,
157157 chat_template_content_format = "auto" ,
158- enable_reasoning = os .getenv ('ENABLE_REASONING' , 'false' ).lower () == 'true' ,
159- reasoning_parser = None ,
160- return_token_as_token_ids = False ,
158+ # enable_reasoning=os.getenv('ENABLE_REASONING', 'false').lower() == 'true',
159+ # reasoning_parser=None,
160+ # return_token_as_token_ids=False,
161161 enable_auto_tools = os .getenv ('ENABLE_AUTO_TOOL_CHOICE' , 'false' ).lower () == 'true' ,
162162 tool_parser = os .getenv ('TOOL_CALL_PARSER' , "" ) or None ,
163163 enable_prompt_tokens_details = False
@@ -167,7 +167,7 @@ async def _initialize_engines(self):
167167 model_config = self .model_config ,
168168 models = self .serving_models ,
169169 request_logger = None ,
170- return_token_as_token_ids = False ,
170+ # return_token_as_token_ids=False,
171171 )
172172
173173 async def generate (self , openai_request : JobInput ):
0 commit comments