import { trackOpenAiFreeUsage } from "https://esm.town/v/snm/trackOpenAiFreeUsage";
import { openAiTextCompletion } from "https://esm.town/v/patrickjm/openAiTextCompletion?v=8";
import { openAiModeration } from "https://esm.town/v/snm/openAiModeration";
import { openAiFreeQuotaExceeded } from "https://esm.town/v/patrickjm/openAiFreeQuotaExceeded?v=2";
import { openAiFreeUsageConfig } from "https://esm.town/v/snm/openAiFreeUsageConfig";
export let gpt3 = async (params: {
openAiKey?: string,
prompt: string,
maxTokens?: number,
}) => {
const MODEL = "text-davinci-003";
const apiKey = params.openAiKey ?? openAiFreeUsageConfig.key;
const exceeded = await openAiFreeQuotaExceeded();
if (!params.openAiKey && exceeded) {
throw new Error(openAiFreeUsageConfig.quota_error);
}
if (!params.openAiKey) {
const moderation = await openAiModeration({
apiKey,
input: params.prompt,
});
if (moderation.results.some((r) => r.flagged)) {
throw new Error(
"Sorry, this prompt was flagged by OpenAI moderation. If you provide your own API key, moderation will be turned off."
);
}
}
const response = await openAiTextCompletion({
apiKey: apiKey,
prompt: params.prompt,
model: MODEL,
max_tokens: params.maxTokens ?? 1000,
});
try {
if (!params.openAiKey) {
await trackOpenAiFreeUsage(MODEL, response.usage.total_tokens);
}
} catch (e) {}
return response.choices?.[0]?.text?.trim();
};