@@ -936,3 +936,100 @@ def json(self):
936936 assert request_body ["model" ] == "o1-pro"
937937 assert request_body ["max_output_tokens" ] == 20
938938 assert "stream" not in request_body
939+
940+
941+ def test_basic_computer_use_preview_tool_call ():
942+ """
943+ Test that LiteLLM correctly handles a computer_use_preview tool call where the environment is set to "linux"
944+
945+ linux is an unsupported environment for the computer_use_preview tool, but litellm users should still be able to pass it to openai
946+ """
947+ # Mock response from OpenAI
948+
949+ mock_response = {
950+ "id" : "resp_67dc3dd77b388190822443a85252da5a0e13d8bdc0e28d88" ,
951+ "object" : "response" ,
952+ "created_at" : 1742486999 ,
953+ "status" : "incomplete" ,
954+ "error" : None ,
955+ "incomplete_details" : {"reason" : "max_output_tokens" },
956+ "instructions" : None ,
957+ "max_output_tokens" : 20 ,
958+ "model" : "o1-pro-2025-03-19" ,
959+ "output" : [
960+ {
961+ "type" : "reasoning" ,
962+ "id" : "rs_67dc3de50f64819097450ed50a33d5f90e13d8bdc0e28d88" ,
963+ "summary" : [],
964+ }
965+ ],
966+ "parallel_tool_calls" : True ,
967+ "previous_response_id" : None ,
968+ "reasoning" : {"effort" : "medium" , "generate_summary" : None },
969+ "store" : True ,
970+ "temperature" : 1.0 ,
971+ "text" : {"format" : {"type" : "text" }},
972+ "tool_choice" : "auto" ,
973+ "tools" : [],
974+ "top_p" : 1.0 ,
975+ "truncation" : "disabled" ,
976+ "usage" : {
977+ "input_tokens" : 73 ,
978+ "input_tokens_details" : {"cached_tokens" : 0 },
979+ "output_tokens" : 20 ,
980+ "output_tokens_details" : {"reasoning_tokens" : 0 },
981+ "total_tokens" : 93 ,
982+ },
983+ "user" : None ,
984+ "metadata" : {},
985+ }
986+ class MockResponse :
987+ def __init__ (self , json_data , status_code ):
988+ self ._json_data = json_data
989+ self .status_code = status_code
990+ self .text = json .dumps (json_data )
991+
992+ def json (self ):
993+ return self ._json_data
994+
995+ with patch (
996+ "litellm.llms.custom_httpx.http_handler.HTTPHandler.post" ,
997+ return_value = MockResponse (mock_response , 200 ),
998+ ) as mock_post :
999+ litellm ._turn_on_debug ()
1000+ litellm .set_verbose = True
1001+
1002+ # Call the responses API with computer_use_preview tool
1003+ response = litellm .responses (
1004+ model = "openai/computer-use-preview" ,
1005+ tools = [{
1006+ "type" : "computer_use_preview" ,
1007+ "display_width" : 1024 ,
1008+ "display_height" : 768 ,
1009+ "environment" : "linux" # other possible values: "mac", "windows", "ubuntu"
1010+ }],
1011+ input = "Check the latest OpenAI news on bing.com." ,
1012+ reasoning = {"summary" : "concise" },
1013+ truncation = "auto"
1014+ )
1015+
1016+ # Verify the request was made correctly
1017+ mock_post .assert_called_once ()
1018+ request_body = json .loads (mock_post .call_args .kwargs ["data" ])
1019+
1020+ # Validate the request structure
1021+ assert request_body ["model" ] == "computer-use-preview"
1022+ assert len (request_body ["tools" ]) == 1
1023+ assert request_body ["tools" ][0 ]["type" ] == "computer_use_preview"
1024+ assert request_body ["tools" ][0 ]["display_width" ] == 1024
1025+ assert request_body ["tools" ][0 ]["display_height" ] == 768
1026+ assert request_body ["tools" ][0 ]["environment" ] == "linux"
1027+
1028+ # Check that reasoning was passed correctly
1029+ assert request_body ["reasoning" ]["summary" ] == "concise"
1030+ assert request_body ["truncation" ] == "auto"
1031+
1032+ # Validate the input format
1033+ assert isinstance (request_body ["input" ], str )
1034+ assert request_body ["input" ] == "Check the latest OpenAI news on bing.com."
1035+
0 commit comments