@@ -141,12 +141,12 @@ def answer_single_question(example, model, answers_file, action_type):
141141 try :
142142 if action_type == "vanilla" :
143143 answer = agent ([{"role" : "user" , "content" : augmented_question }]).content
144- token_count = agent .last_output_token_count
144+ token_counts = agent .monitor . get_total_token_counts ()
145145 intermediate_steps = answer
146146 else :
147147 # Run agent 🚀
148148 answer = str (agent .run (augmented_question ))
149- token_count = agent .monitor .get_total_token_counts ()
149+ token_counts = agent .monitor .get_total_token_counts ()
150150 # Remove memory from logs to make them more compact.
151151 for step in agent .memory .steps :
152152 if isinstance (step , ActionStep ):
@@ -157,6 +157,8 @@ def answer_single_question(example, model, answers_file, action_type):
157157 except Exception as e :
158158 print ("Error on " , augmented_question , e )
159159 intermediate_steps = []
160+ token_counts = {"input" : 0 , "output" : 0 }
161+ answer = str (e )
160162 end_time = datetime .datetime .now ().strftime ("%Y-%m-%d %H:%M:%S" )
161163 annotated_example = {
162164 "model_id" : model .model_id ,
@@ -169,7 +171,7 @@ def answer_single_question(example, model, answers_file, action_type):
169171 "intermediate_steps" : intermediate_steps ,
170172 "start_time" : start_time ,
171173 "end_time" : end_time ,
172- "token_counts" : token_count ,
174+ "token_counts" : token_counts ,
173175 }
174176 append_answer (annotated_example , answers_file )
175177
@@ -233,7 +235,7 @@ def answer_questions(
233235 max_completion_tokens = 8192 ,
234236 )
235237 else :
236- model = HfApiModel (model_id = args .model_id , provider = "together" , max_tokens = 8192 )
238+ model = HfApiModel (model_id = args .model_id , max_tokens = 8192 )
237239
238240 answer_questions (
239241 eval_ds ,
0 commit comments