|
| 1 | +import os |
| 2 | +from flask import Flask, request, render_template, redirect |
| 3 | +from openai import OpenAI |
| 4 | + |
| 5 | +client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
| 6 | +server = Flask(__name__) |
| 7 | +system_prompt = "You are the personal assistant of Kangdong!" |
| 8 | + |
| 9 | + |
| 10 | +def send_gpt(prompt): |
| 11 | + try: |
| 12 | + response = client.chat.completions.create( |
| 13 | + model="gpt-4o-mini", |
| 14 | + messages=[ |
| 15 | + {"role": "system", "content":system_prompt}, |
| 16 | + {"role": "user", "content": prompt}], |
| 17 | + temperature=0 |
| 18 | + ) |
| 19 | + response_dict = response.model_dump() |
| 20 | + response_message = response_dict["choices"][0]["message"]["content"] |
| 21 | + return response_message |
| 22 | + except Exception as e: |
| 23 | + return e |
| 24 | + |
| 25 | + |
| 26 | +@server.route('/', methods=['GET', 'POST']) |
| 27 | +def get_request_json(): |
| 28 | + if request.method == 'POST': |
| 29 | + if len(request.form['question']) < 1: |
| 30 | + return render_template( |
| 31 | + 'chat3.5.html', question="NULL", res="Question can't be empty!") |
| 32 | + question = request.form['question'] |
| 33 | + print("======================================") |
| 34 | + print("Receive the question:", question) |
| 35 | + res = send_gpt(question) |
| 36 | + print("Q:\n", question) |
| 37 | + print("A:\n", res) |
| 38 | + |
| 39 | + return render_template('chat3.5.html', question=question, res=str(res)) |
| 40 | + return render_template('chat3.5.html', question=0) |
| 41 | + |
| 42 | +if __name__ == '__main__': |
| 43 | + server.run(debug=True, host='0.0.0.0', port=5000) |
0 commit comments