Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 49 additions & 36 deletions backend/src/utils/openai/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,100 +2,113 @@ import OpenAI from "openai";
import { HistoryData } from "../../services/copilot.services";
import getSecrets from "../getSecrets";

function mask(s?: string) {
return s ? s.slice(0, 6) + "..." : s;
}

function assertModelLooksRight(label: string, model?: string) {
if (!model) throw new Error(`${label} is empty`);
if (model.startsWith("sk-")) {
throw new Error(`${label} looks like an API key. Check your env or getSecrets mapping`);
}
}

export const chatCompletion = async ({
history = [],
model = "gpt-4",
model, // optional override
format = "json",
}: {
history: HistoryData[];
model: string;
model?: string;
format?: string;
}) => {
try {


const MODEL_API_KEY_LARGE = await getSecrets("MODEL_API_KEY_LARGE");
const LLM_MODEL = await getSecrets("MODEL_LARGE");
const LLM_MODEL_FROM_SECRETS = await getSecrets("MODEL_LARGE"); // ex: gpt-4o
const MODEL_BASE_PATH_LARGE = await getSecrets("MODEL_BASE_PATH_LARGE");

const resolvedModel = model ?? LLM_MODEL_FROM_SECRETS;
assertModelLooksRight("MODEL_LARGE", resolvedModel);

const openai = new OpenAI({
apiKey: MODEL_API_KEY_LARGE,
baseURL: MODEL_BASE_PATH_LARGE || undefined,
});

if (MODEL_BASE_PATH_LARGE) {
openai.baseURL = MODEL_BASE_PATH_LARGE;
}

const completionConfig: any = {
model: LLM_MODEL,
model: resolvedModel,
messages: history,
temperature: 0.75,
// omit temperature when using strict JSON output to avoid model constraints
};

if (format === "json") {
completionConfig.response_format = { type: "json_object" };
} else {
completionConfig.temperature = 0.7;
}

console.log("completionConfig", completionConfig)
console.log("completionConfig", {
model: completionConfig.model,
baseURL: MODEL_BASE_PATH_LARGE,
apiKey: mask(MODEL_API_KEY_LARGE),
format,
});

const response = await openai.chat.completions.create(completionConfig);

const content = response.choices[0].message.content;

console.log("response", content)
const content = response.choices[0]?.message?.content ?? "";
const usage = response.usage;

return {
content,
usage,
};
return { content, usage };
} catch (error) {
console.log(error);
}
};

export const chatCompletion3 = async ({
history = [],
model = "gpt-3.5-turbo-1106",
model,
format = "json",
}: {
history: HistoryData[];
model: string;
model?: string;
format?: string;
}) => {
try {

const MODEL_API_KEY_SMALL = await getSecrets("MODEL_API_KEY_SMALL");
const MODEL_SMALL = await getSecrets("MODEL_API_KEY");
const MODEL_SMALL_FROM_SECRETS = await getSecrets("MODEL_SMALL"); // fixed here
const MODEL_BASE_PATH_SMALL = await getSecrets("MODEL_BASE_PATH_SMALL");

const resolvedModel = model ?? MODEL_SMALL_FROM_SECRETS;
assertModelLooksRight("MODEL_SMALL", resolvedModel);

const openai = new OpenAI({
apiKey: MODEL_API_KEY_SMALL,
baseURL: MODEL_BASE_PATH_SMALL || undefined,
});

if (MODEL_BASE_PATH_SMALL) {
openai.baseURL = MODEL_BASE_PATH_SMALL;
}

const completionConfig: any = {
model: MODEL_SMALL,
model: resolvedModel,
messages: history,
temperature: 0.75,
};

if (format === "json") {
completionConfig.response_format = { type: "json_object" };
} else {
completionConfig.temperature = 0.7;
}

const response = await openai.chat.completions.create(completionConfig);
console.log("completionConfig3", {
model: completionConfig.model,
baseURL: MODEL_BASE_PATH_SMALL,
apiKey: mask(MODEL_API_KEY_SMALL),
format,
});

const content = response.choices[0].message.content;
const response = await openai.chat.completions.create(completionConfig);
const content = response.choices[0]?.message?.content ?? "";
const usage = response.usage;

return {
content,
usage,
};
return { content, usage };
} catch (error) {
console.log(error);
}
Expand Down