|
1 |
| -function gpt3(promt) { |
| 1 | +/** |
| 2 | + * A function that uses the OpenAI GPT-3 API to generate a response to a prompt. |
| 3 | + * |
| 4 | + * @param {string} prompt - The prompt to generate a response to. |
| 5 | + * @returns {string} The generated response. |
| 6 | + */ |
| 7 | +function gpt3(prompt) { |
| 8 | + // Get the script cache. |
2 | 9 | var cache = CacheService.getScriptCache();
|
3 |
| - var cache_str = cache.get('cache'); |
| 10 | + |
| 11 | + // Get the cached messages, or create an empty array of messages. |
| 12 | + var cache_str = cache.get("cache"); |
4 | 13 | var messages = [];
|
5 | 14 | if (cache_str == null) {
|
6 |
| - messages = [{ |
7 |
| - "role": "user", |
8 |
| - "content": promt |
9 |
| - }]; |
| 15 | + messages = [ |
| 16 | + { |
| 17 | + role: "user", |
| 18 | + content: prompt |
| 19 | + } |
| 20 | + ]; |
10 | 21 | } else {
|
11 | 22 | messages = JSON.parse(cache_str);
|
12 | 23 | messages[messages.length] = {
|
13 |
| - "role": "user", |
14 |
| - "content": promt |
| 24 | + role: "user", |
| 25 | + content: prompt |
15 | 26 | };
|
16 | 27 | }
|
| 28 | + |
| 29 | + // Call the OpenAI API to generate a response. |
17 | 30 | var response = callAPI(messages);
|
| 31 | + |
| 32 | + // Add the generated response to the messages array. |
18 | 33 | messages[messages.length] = {
|
19 |
| - "role": "assistant", |
20 |
| - "content": response |
| 34 | + role: "assistant", |
| 35 | + content: response |
21 | 36 | };
|
| 37 | + |
| 38 | + // Remove old messages if the length of the messages array exceeds a certain threshold. |
22 | 39 | while (JSON.stringify(messages).length > 100000) {
|
23 | 40 | var todelete = messages.shift();
|
24 | 41 | }
|
25 |
| - cache.put('cache', JSON.stringify(messages), 300); |
| 42 | + |
| 43 | + // Cache the messages array. |
| 44 | + cache.put("cache", JSON.stringify(messages), 300); |
| 45 | + |
| 46 | + // Return the generated response. |
26 | 47 | return response;
|
27 | 48 | }
|
28 | 49 |
|
| 50 | +/** |
| 51 | + * A function that calls the OpenAI API to generate a response to a set of messages. |
| 52 | + * |
| 53 | + * @param {Object[]} messages - An array of messages. |
| 54 | + * @returns {string} The generated response. |
| 55 | + */ |
29 | 56 | function callAPI(messages) {
|
| 57 | + // Construct the data object for the API call. |
30 | 58 | var data = {
|
31 |
| - 'model': 'gpt-3.5-turbo', |
32 |
| - 'messages': messages, |
| 59 | + model: "gpt-3.5-turbo", |
| 60 | + messages: messages |
33 | 61 | };
|
| 62 | + |
| 63 | + // Set the options for the API call. |
34 | 64 | var options = {
|
35 |
| - 'method': 'post', |
36 |
| - 'contentType': 'application/json', |
37 |
| - 'payload': JSON.stringify(data), |
38 |
| - 'headers': { |
39 |
| - Authorization: 'Bearer ' + CONFIG().openai_apikey, |
| 65 | + method: "post", |
| 66 | + contentType: "application/json", |
| 67 | + payload: JSON.stringify(data), |
| 68 | + headers: { |
| 69 | + Authorization: "Bearer " + CONFIG().openai_apikey |
40 | 70 | },
|
41 | 71 | muteHttpExceptions: true
|
42 | 72 | };
|
| 73 | + |
| 74 | + // Make the API call. |
43 | 75 | var response = UrlFetchApp.fetch(
|
44 |
| - 'https://api.openai.com/v1/chat/completions', |
45 |
| - options, |
| 76 | + "https://api.openai.com/v1/chat/completions", |
| 77 | + options |
46 | 78 | );
|
| 79 | + |
| 80 | + // Log the response for debugging purposes. |
47 | 81 | Logger.log(response.getContentText());
|
| 82 | + |
| 83 | + // If the API call returned an error, return the error message. |
48 | 84 | if (JSON.parse(response.getContentText()).error?.message != undefined) {
|
49 |
| - return JSON.parse(response.getContentText())['error']['message']; |
| 85 | + return JSON.parse(response.getContentText())["error"]["message"]; |
50 | 86 | } else {
|
51 |
| - return JSON.parse(response.getContentText())['choices'][0]['message']['content'] |
| 87 | + // Otherwise, return the generated response. |
| 88 | + return JSON.parse(response.getContentText())["choices"][0]["message"]["content"]; |
52 | 89 | }
|
53 | 90 | }
|
0 commit comments