-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathserver.py
More file actions
109 lines (87 loc) · 3.25 KB
/
server.py
File metadata and controls
109 lines (87 loc) · 3.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import openai
from flask import Flask, send_from_directory, request, jsonify, Response, stream_with_context
from groq import Groq
app = Flask(__name__, static_folder="static")
# Set your OpenAI API key
openai.api_key = "<open_ai_key>"
# Groq API key
GROQ_API_KEY = "<groq_key>"
# Serve the index.html from the static folder
@app.route("/")
def serve_index():
return send_from_directory("static", "index.html")
# Serve other static files (CSS, JS, etc.)
@app.route("/<path:path>")
def serve_static_files(path):
return send_from_directory("static", path)
# KG Graph API Endpoint
@app.route("/kg-graph", methods=["GET"])
def kg_graph():
# Simulated KG Graph text
response = {"kg_graph_text": "This is the simulated KG Graph output."}
return jsonify(response)
# API Endpoint: ChatGPT
@app.route("/chatgpt", methods=["POST"])
def chatgpt():
try:
# Parse the incoming request
data = request.json
question = data.get("question", "")
if not question:
return jsonify({"error": "Question is required"}), 400
# Send the question to OpenAI's ChatGPT API using the new method
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # Use "gpt-4" if available
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
# Extract the reply from OpenAI's response
answer = response["choices"][0]["message"]["content"]
return jsonify({"response": answer})
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route("/groq", methods=["POST"])
def groq():
try:
data = request.json
question = data.get("question", "")
if not question:
return jsonify({"error": "Question is required"}), 400
# Initialize the Groq client
client = Groq()
# Create the Groq completion request
completion = client.chat.completions.create(
model="llama3-groq-70b-8192-tool-use-preview",
messages=[{"role": "user", "content": question}],
temperature=0.5,
max_tokens=300,
top_p=0.65,
stream=True,
stop=None,
)
# Collect chunks into a single response
response_text = ""
for chunk in completion:
response_text += chunk.choices[0].delta.content or ""
# Return the accumulated response as JSON
return jsonify({"response": response_text})
except Exception as e:
return jsonify({"error": str(e)}), 500
# API Endpoint: Custom Response
@app.route("/custom-response", methods=["POST"])
def custom_response():
data = request.json
question = data.get("question", "")
# Simulate a longer custom response
response_text = (
f"Response: {question}. "
"This is a simulated custom hardcoded response. "
"It is designed to demonstrate word-by-word rendering in the application. "
"This response is deliberately made longer so that the behaviour can be fully appreciated."
)
response = {"response": response_text}
return jsonify(response)
if __name__ == "__main__":
app.run(port=8001, debug=True)