|
"use server" |
|
|
|
import type { ChatCompletionMessage } from "openai/resources/chat" |
|
import OpenAI from "openai" |
|
|
|
const openaiApiKey = `${process.env.AUTH_OPENAI_API_KEY || ""}` |
|
|
|
export async function predictWithOpenAI(inputs: string) { |
|
const openaiApiBaseUrl = `${process.env.LLM_OPENAI_API_BASE_URL || "https://api.openai.com/v1"}` |
|
const openaiApiModel = `${process.env.LLM_OPENAI_API_MODEL || "gpt-3.5-turbo"}` |
|
|
|
const openai = new OpenAI({ |
|
apiKey: openaiApiKey, |
|
baseURL: openaiApiBaseUrl, |
|
}) |
|
|
|
const messages: ChatCompletionMessage[] = [ |
|
{ role: "system", content: inputs }, |
|
] |
|
|
|
try { |
|
const res = await openai.chat.completions.create({ |
|
messages: messages, |
|
stream: false, |
|
model: openaiApiModel, |
|
temperature: 0.8 |
|
}) |
|
|
|
return res.choices[0].message.content |
|
} catch (err) { |
|
console.error(`error during generation: ${err}`) |
|
} |
|
} |