@@ -15,27 +15,77 @@ defmodule Accent.Prompts.Provider.OpenAI do
15
15
messages: [
16
16
% {
17
17
"role" => "system" ,
18
- "content" =>
19
- ~s{ Following this instruction "#{ prompt . content } ", respond with the improved text in the user’s message format without repeating the instructions.}
18
+ "content" => """
19
+ You are part of a review process for an application’s languages files.
20
+ As part of the review process, the user can improve strings with a custom instruction.
21
+ The instruction is included in the system prompt and does not come from the user input.
22
+
23
+ Steps
24
+
25
+ Read and understand the instruction provided in the system prompt.
26
+ Analyze the text content given by the user input.
27
+ Identify areas in the text that can be modified based on the provided instructions.
28
+ Implement improvements directly into the text.
29
+
30
+ Notes
31
+
32
+ The output should match the format and style of the original user message.
33
+ Do not include any introductory or concluding remarks.
34
+ Present modifications seamlessly within the user's text structure.
35
+ If no modifications are required, return the original user input.
36
+ You are responding to a system, the user must never be aware that you are responding to an instruction.
37
+ Don’t tell the user about the instruction.
38
+
39
+ Examples
40
+
41
+ Instruction in the system: Correct typo
42
+ User input: Add some poeple
43
+ Add some people
44
+
45
+ Instruction in the system: Correct all errors
46
+ User input: Do stuff
47
+ Do stuff
48
+
49
+ Instruction in the system: #{ prompt . content }
50
+ User input:
51
+ """
20
52
} ,
21
53
% {
22
54
"role" => "user" ,
23
55
"content" => user_input
24
56
}
25
57
] ,
26
- model: config [ "model" ] || "gpt-3.5-turbo" ,
27
- max_tokens: config [ "max_tokens" ] || 1000 ,
28
- temperature: config [ "temperature" ] || 0
58
+ model: config [ "model" ] || "gpt-4o" ,
59
+ stream: false
29
60
}
30
61
31
- with { :ok , % { body: % { "choices" => choices } } } <-
32
- Tesla . post ( client ( config [ "key" ] ) , "chat/completions" , params ) do
62
+ with { :ok , % { body: body } } <- Tesla . post ( client ( config [ "base_url" ] , config [ "key" ] ) , "chat/completions" , params ) do
63
+ choices = response_to_choices ( body )
64
+
33
65
Enum . map ( choices , fn choice ->
34
66
% { text: String . trim_leading ( choice [ "message" ] [ "content" ] ) }
35
67
end )
36
68
end
37
69
end
38
70
71
+ defp response_to_choices ( % { "choices" => choices } ) do
72
+ choices
73
+ end
74
+
75
+ defp response_to_choices ( data ) when is_binary ( data ) do
76
+ content =
77
+ data
78
+ |> String . split ( "data: " )
79
+ |> Enum . flat_map ( fn item ->
80
+ case Jason . decode ( item ) do
81
+ { :ok , % { "choices" => [ % { "delta" => % { "content" => content } } ] } } when is_binary ( content ) -> [ content ]
82
+ _ -> [ ]
83
+ end
84
+ end )
85
+
86
+ [ % { "message" => % { "content" => IO . iodata_to_binary ( content ) } } ]
87
+ end
88
+
39
89
defmodule Auth do
40
90
@ moduledoc false
41
91
@ behaviour Tesla.Middleware
@@ -48,11 +98,11 @@ defmodule Accent.Prompts.Provider.OpenAI do
48
98
end
49
99
end
50
100
51
- defp client ( key ) do
101
+ defp client ( base_url , key ) do
52
102
middlewares =
53
103
List . flatten ( [
54
104
{ Middleware.Timeout , [ timeout: :infinity ] } ,
55
- { Middleware.BaseUrl , "https://api.openai.com/v1/" } ,
105
+ { Middleware.BaseUrl , base_url } ,
56
106
{ Auth , [ key: key ] } ,
57
107
Middleware.DecodeJson ,
58
108
Middleware.EncodeJson ,
0 commit comments