Music_LMMs / response.py
fistyee
update
d0c2b7c
raw
history blame
3.44 kB
import os
import openai
import music_search
from dotenv import load_dotenv
from zhipuai import ZhipuAI
# Load environment variables
load_dotenv()
# Initialize OpenAI client
openai.api_key = os.getenv("OPENAI_API_KEY")
engine = "gpt-4o-mini"
def get_gpt_response_stream(messages, prompt):
messages.append({"role": "user", "content": prompt})
# Use OpenAI API for response
try:
response = openai.ChatCompletion.create(
model=engine,
messages=messages,
temperature=0.2,
max_tokens=4096,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stream=True # Enable streaming
)
return response.choices[0].message.content
except Exception as e:
yield f"Error: {e}"
def get_gpt_response_stream(messages, prompt):
messages.append({"role": "user", "content": prompt})
response_text = ""
# Use OpenAI API for streaming response
try:
for chunk in openai.ChatCompletion.create(
model=engine,
messages=messages,
temperature=0.2,
max_tokens=4096,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stream=True # Enable streaming
):
if 'content' in chunk['choices'][0]['delta']:
response_text += chunk['choices'][0]['delta']['content']
yield response_text # Yield response incrementally
except Exception as e:
yield f"Error: {e}"
def get_zhipuai_response_stream(messages, prompt):
print("Inside get_zhipuai_response")
client = ZhipuAI(api_key="423ca4c1f712621a4a1740bb6008673b.81aM7DNo2Ssn8FPA")
messages.append({"role": "user", "content": prompt})
response_text = ""
# Use ZhipuAI API for streaming response
try:
response = client.chat.completions.create(
model="glm-4-flash",
messages=messages,
stream=True # Enable streaming
)
print("Response received from ZhipuAI")
print(response)
for chunk in response:
print(f"Chunk received: {chunk}") # Log each chunk
response_text = chunk.choices[0].delta.content
print(response_text)
yield response_text # Yield response incrementally
except Exception as e:
print(f"Error in get_zhipuai_response_stream: {e}")
yield f"Error: {e}"
def get_zhipuai_response(messages, prompt):
print("Inside get_zhipuai_response") # Confirming entry into the function
client = ZhipuAI(api_key="423ca4c1f712621a4a1740bb6008673b.81aM7DNo2Ssn8FPA")
messages.append({"role": "user", "content": prompt})
print("Messages prepared:", messages) # Log messages
response_text = ""
# Non-streaming test
try:
print("Calling ZhipuAI API...") # Log before API call
response = client.chat.completions.create(
model="glm-4-flash",
messages=messages,
stream=False # Disable streaming for this test
)
print("Response received from ZhipuAI") # Log response retrieval
response_text = response.choices[0].message.content
return response_text
except Exception as e:
print(f"Error in get_zhipuai_response: {e}") # More informative error message
return f"Error: {e}"