Skip to content

Commit ba7dc6c

Browse files
Adam BaloghAdam Balogh
authored andcommitted
Merge branch 'main' of github.com:OpenGradient/bluefin-agent
2 parents 21a5e2d + 97b02b9 commit ba7dc6c

7 files changed

Lines changed: 57 additions & 35 deletions

File tree

.env.example

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,5 +17,6 @@ export FIREBASE_CLIENT_EMAIL=''
1717
export FIREBASE_CLIENT_ID=''
1818
export FIREBASE_CLIENT_X509_CERT_URL=''
1919

20-
# To skip firebase token authentication on endpoints. DO NOT ENABLE IN PRODUCTION.
21-
export SKIP_FIREBASE_TOKEN_AUTH=false
20+
# Skip token authentication on endpoints.
21+
export SKIP_TOKEN_AUTH_HEADER=''
22+
export SKIP_TOKEN_AUTH_KEY=''

.github/workflows/deploy.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ jobs:
5050
5151
- name: AWS LINKS TO VIEW DEPLOYMENT
5252
run: |
53-
echo "View deployment progress here - https://us-east-2.console.aws.amazon.com/ecs/v2/clusters/Agents/services/two-ligma-agent-api/tasks?region=us-east-2"
53+
echo "View deployment progress here - https://us-east-2.console.aws.amazon.com/ecs/v2/clusters/Agents/services/two-ligma-api-service/tasks?region=us-east-2"
5454
# - name: Wait for deployment to complete
5555
# run: |
5656
# echo "Waiting for ECS deployment to complete..."

server/auth.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from typing import Callable, TypeVar, cast
55
from flask import abort, request, jsonify, g
66
from .logging import logger
7-
from .config import SKIP_FIREBASE_TOKEN_AUTH
7+
from .config import SKIP_TOKEN_AUTH_HEADER, SKIP_TOKEN_AUTH_KEY
88

99
class FirebaseIDTokenData(BaseModel):
1010
uid: str
@@ -48,8 +48,10 @@ def protected_route():
4848
"""
4949
@wraps(f)
5050
def decorated_function(*args, **kwargs):
51-
if SKIP_FIREBASE_TOKEN_AUTH:
52-
return f(*args, **kwargs)
51+
if SKIP_TOKEN_AUTH_HEADER and SKIP_TOKEN_AUTH_KEY:
52+
skip_auth_header = request.headers.get(SKIP_TOKEN_AUTH_HEADER)
53+
if skip_auth_header and skip_auth_header == SKIP_TOKEN_AUTH_KEY:
54+
return f(*args, **kwargs)
5355

5456
auth_header = request.headers.get('Authorization')
5557
if not auth_header or not auth_header.startswith('Bearer '):

server/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import os
22
import logging
3-
from typing import Literal
43

5-
SKIP_FIREBASE_TOKEN_AUTH = bool(os.getenv("SKIP_FIREBASE_TOKEN_AUTH", False))
4+
SKIP_TOKEN_AUTH_HEADER = os.getenv("SKIP_TOKEN_AUTH_HEADER")
5+
SKIP_TOKEN_AUTH_KEY = os.getenv("SKIP_TOKEN_AUTH_KEY")
66

77
# See if we are running in subnet mode
88
SUBNET_MODE = os.getenv("subnet_mode", "false").lower() == "true"

server/firebase.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import os
22
import firebase_admin # type: ignore[import-untyped]
33
from firebase_admin import auth # noqa: F401
4+
from dotenv import dotenv_values
45

56
def validate_firebase_env_vars():
67
"""
@@ -24,6 +25,13 @@ def validate_firebase_env_vars():
2425
# Get and process the private key to handle newlines
2526
private_key = os.environ.get("FIREBASE_PRIVATE_KEY").replace("\\n", "\n")
2627

28+
# Sometimes, when loading multiline env variables from a .env file,
29+
# Only the -----BEGIN PRIVATE KEY----- is loaded (\n causes issues).
30+
# This handles this case.
31+
if '\n' not in private_key:
32+
config = dotenv_values()
33+
private_key = config["FIREBASE_PRIVATE_KEY"]
34+
2735
return (
2836
os.environ.get("FIREBASE_PROJECT_ID"),
2937
os.environ.get("FIREBASE_PRIVATE_KEY_ID"),

subnet/subnet_methods.py

Lines changed: 37 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,12 @@ def make_request(input_data: Dict[str, Any], endpoint: str) -> requests.Response
4141
)
4242

4343

44+
import time
45+
46+
4447
def subnet_evaluation(quant_query: QuantQuery, quant_response: QuantResponse) -> float:
4548
"""
46-
Evaluate the subnet miner query based on the provided QuantQuery and QuantResponse.
49+
Evaluate the subnet miner query based on the provided QuantQuery and QuantResponse, with up to 3 retries on failure.
4750
4851
Args:
4952
quant_query (QuantQuery): The query object containing the query string and metadata.
@@ -56,31 +59,39 @@ def subnet_evaluation(quant_query: QuantQuery, quant_response: QuantResponse) ->
5659
if evaluation_model is None:
5760
evaluation_model = create_evaluation_model()
5861

59-
try:
60-
template = env.get_template("evaluation_prompt.txt")
61-
prompt = template.render(
62-
user_prompt=quant_query.query,
63-
agent_answer="No response provided" if quant_response is None else quant_response.response)
64-
65-
# Format messages properly for ChatOpenAI
66-
messages = [{"role": "user", "content": prompt}]
67-
response = evaluation_model.invoke(messages)
68-
69-
# Parse the response
70-
answer = response.content if hasattr(response, "content") else response["content"]
71-
72-
# Find ```json{{...}}``` in the answer
73-
match = re.search(r"```json\s*({.*})\s*```", answer, re.DOTALL)
74-
if not match:
75-
logging.error(f"Could not find JSON in model response: {answer}")
76-
return 0.0
77-
json_str = match.group(1)
78-
score = json.loads(json_str)["score"]
79-
# Normalize the score to be between 0 and 1
80-
return float(score) / 50
81-
except Exception as e:
82-
logging.error(f"subnet_evaluation error: {e}")
83-
return 0.0
62+
retries = 3
63+
delay = 3.0
64+
last_exception = None
65+
for attempt in range(1, retries + 1):
66+
try:
67+
template = env.get_template("evaluation_prompt.txt")
68+
prompt = template.render(
69+
user_prompt=quant_query.query,
70+
agent_answer="No response provided" if quant_response is None else quant_response.response)
71+
72+
# Format messages properly for ChatOpenAI
73+
messages = [{"role": "user", "content": prompt}]
74+
response = evaluation_model.invoke(messages)
75+
76+
# Parse the response
77+
answer = response.content if hasattr(response, "content") else response["content"]
78+
79+
# Find ```json{{...}}``` in the answer
80+
match = re.search(r"```json\s*({.*})\s*```", answer, re.DOTALL)
81+
if not match:
82+
logging.error(f"Could not find JSON in model response: {answer}")
83+
return 0.0
84+
json_str = match.group(1)
85+
score = json.loads(json_str)["score"]
86+
# Normalize the score to be between 0 and 1
87+
return float(score) / 50
88+
except Exception as e:
89+
last_exception = e
90+
logging.error(f"subnet_evaluation attempt {attempt} failed: {e}")
91+
if attempt < retries:
92+
time.sleep(delay)
93+
logging.error(f"subnet_evaluation failed after {retries} attempts: {last_exception}")
94+
return 0.0
8495

8596

8697
def subnet_query(quant_query: QuantQuery) -> QuantResponse:

testclient/client.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def main():
3636
tokens = agent_output.get("tokens", [])
3737

3838
# print results
39-
print(f"Two-Ligma: {answer}")
39+
print(f"BitQuant: {answer}")
4040
if pools:
4141
print(f"Pools: {pools}")
4242
if tokens:

0 commit comments

Comments
 (0)