Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
frontend/node_modules
frontend/node_modules
Recommender/.idea
Recommender/venv
backend/venv
2 changes: 1 addition & 1 deletion .idea/codefest24.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 13 additions & 0 deletions .idea/inspectionProfiles/Project_Default.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions backend/loadmodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import tensorflow as tf
import numpy as np


model = tf.keras.models.load_model("model.keras")
#../Recommender/model1acc=0.6855000257492065/saved_model
# model.summary()
content = [1, 1, 1, 1, 0, 0, 0, 3, 2, 0, 2, 3, 3, 2, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0]
pred = model.predict(np.array([content]))
print(pred)
70 changes: 70 additions & 0 deletions backend/model.ipynb

Large diffs are not rendered by default.

Binary file added backend/model.keras
Binary file not shown.
56 changes: 56 additions & 0 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
absl-py==2.1.0
annotated-types==0.6.0
anyio==4.3.0
astunparse==1.6.3
blinker==1.7.0
certifi==2024.2.2
charset-normalizer==3.3.2
click==8.1.7
distro==1.9.0
exceptiongroup==1.2.1
Flask==3.0.3
Flask-Cors==4.0.0
flatbuffers==24.3.25
gast==0.5.4
google-pasta==0.2.0
grpcio==1.62.2
h11==0.14.0
h5py==3.11.0
httpcore==1.0.5
httpx==0.27.0
idna==3.7
itsdangerous==2.2.0
Jinja2==3.1.3
keras==3.2.1
libclang==18.1.1
Markdown==3.6
markdown-it-py==3.0.0
MarkupSafe==2.1.5
mdurl==0.1.2
ml-dtypes==0.3.2
namex==0.0.8
numpy==1.26.4
openai==1.23.2
opt-einsum==3.3.0
optree==0.11.0
packaging==24.0
protobuf==4.25.3
pydantic==2.7.0
pydantic_core==2.18.1
Pygments==2.17.2
python-dotenv==1.0.1
requests==2.31.0
rich==13.7.1
six==1.16.0
sniffio==1.3.1
tensorboard==2.16.2
tensorboard-data-server==0.7.2
tensorflow==2.16.1
tensorflow-io-gcs-filesystem==0.36.0
termcolor==2.4.0
timeout-decorator==0.5.0
tqdm==4.66.2
typing_extensions==4.11.0
urllib3==2.2.1
Werkzeug==3.0.2
wrapt==1.16.0
83 changes: 83 additions & 0 deletions backend/route.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
from flask import Flask, request
from flask_cors import CORS
import multiprocessing
import timeout_decorator
import tensorflow as tf
import random
import numpy as np
from openai import OpenAI
from dotenv import load_dotenv
import os

# Load the environment variables from the .env file
load_dotenv()

app = Flask(__name__)

CORS(app)


def execute_code(code):
try:
exec(code, {"__name__": "__main__"})
except Exception as e:
return str(e)


def check_python_code(code):
try:
# Check if the code compiles
compile(code, '<string>', 'exec')
except SyntaxError as e:
return "SyntaxError"

# Set a timeout and execute the code
try:
process = multiprocessing.Process(target=execute_code, args=(code,))
process.start()
process.join(timeout=5) # Adjust timeout as necessary
if process.is_alive():
process.terminate()
process.join()
return "Infinite"
except timeout_decorator.timeout_decorator.TimeoutError:
return "Infinite"
except Exception as e:
return f"Error"

return "Success"


@app.route("/run", methods=["POST"])
def run():
content = f"""{request.get_json()["code"]}"""
return {"response": check_python_code(content)}

@app.route("/llm_api", methods=["POST"])
def llm_api():
content = f"""Ok, so here is the question: {request.get_json()["question"]} and here is the answer: {request.get_json()["answer"]}. Given this context, please check if this answer is correct based on the question. If it is not, please provide the correct answer. You have to give the output in the format <Correct/Incorrect> <Correct Answer/Positive Feedback>. For example in: question: print hello world statement, answer: print("hello world"), the output should be: Correct Good Job!. If the answer is incorrect, the output should be: Incorrect The correct answer is print("hello world")."""
# print("API Ky", os.getenv('OPENAI_API'))
client = OpenAI(api_key=os.getenv('OPENAI_API'))
response = client.chat.completions.create(model="ft:gpt-3.5-turbo-0125:personal::9GCHGPWm", messages=[{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": content}])
msg = response.choices[0].message.content
return {"response": msg.split(" ")[0], "feedback": " ".join(msg.split(" ")[1:])}

@app.route("/llm_chatbot", methods=["POST"])
def llm_chatbot_msg():
client = OpenAI(api_key=os.getenv('OPENAI_API'))
response = client.chat.completions.create(model="ft:gpt-3.5-turbo-0125:personal::9GCHGPWm", messages=[{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": request.get_json()["message"]}])
msg = response.choices[0].message.content
return {"feedback": msg}


@app.route("/recommend_question", methods=['POST'])
def recommend():
content = request.get_json()["data"]
loaded = tf.keras.models.load_model("model.keras")
prediction = loaded.predict(np.array([content]))[0]
questions = [i for i in range(1, 22)]
choice = random.choices(questions, prediction, k=1)
return choice

if __name__ == "__main__":
app.run(port=3002, debug=True)