File size: 3,437 Bytes
d0c2b7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import openai
import music_search
from dotenv import load_dotenv
from zhipuai import ZhipuAI

# Load environment variables
load_dotenv()
# Initialize OpenAI client
openai.api_key = os.getenv("OPENAI_API_KEY")
engine = "gpt-4o-mini"

def get_gpt_response_stream(messages, prompt):
    messages.append({"role": "user", "content": prompt})

    # Use OpenAI API for response
    try:
        response =  openai.ChatCompletion.create(
            model=engine,
            messages=messages,
            temperature=0.2,
            max_tokens=4096,
            top_p=0.95,
            frequency_penalty=0,
            presence_penalty=0,
            stream=True  # Enable streaming
        )
        return response.choices[0].message.content
               
    except Exception as e:
        yield f"Error: {e}"

def get_gpt_response_stream(messages, prompt):
    messages.append({"role": "user", "content": prompt})
    response_text = ""

    # Use OpenAI API for streaming response
    try:
        for chunk in openai.ChatCompletion.create(
            model=engine,
            messages=messages,
            temperature=0.2,
            max_tokens=4096,
            top_p=0.95,
            frequency_penalty=0,
            presence_penalty=0,
            stream=True  # Enable streaming
        ):
            if 'content' in chunk['choices'][0]['delta']:
                response_text += chunk['choices'][0]['delta']['content']
                yield response_text  # Yield response incrementally
    except Exception as e:
        yield f"Error: {e}"


def get_zhipuai_response_stream(messages, prompt):
    print("Inside get_zhipuai_response")
    client = ZhipuAI(api_key="423ca4c1f712621a4a1740bb6008673b.81aM7DNo2Ssn8FPA")
    messages.append({"role": "user", "content": prompt})
    response_text = ""

    # Use ZhipuAI API for streaming response
    try:
        response = client.chat.completions.create(
            model="glm-4-flash",
            messages=messages,
            stream=True  # Enable streaming
        )
        print("Response received from ZhipuAI")
        print(response)
        for chunk in response:
            print(f"Chunk received: {chunk}")  # Log each chunk
            response_text = chunk.choices[0].delta.content
            print(response_text)
            yield response_text  # Yield response incrementally
    except Exception as e:
        print(f"Error in get_zhipuai_response_stream: {e}")
        yield f"Error: {e}"

def get_zhipuai_response(messages, prompt):
    print("Inside get_zhipuai_response")  # Confirming entry into the function
    client = ZhipuAI(api_key="423ca4c1f712621a4a1740bb6008673b.81aM7DNo2Ssn8FPA")
    
    messages.append({"role": "user", "content": prompt})

    print("Messages prepared:", messages)  # Log messages

    response_text = ""
    
    # Non-streaming test
    try:
        print("Calling ZhipuAI API...")  # Log before API call
        response = client.chat.completions.create(
            model="glm-4-flash",
            messages=messages,
            stream=False  # Disable streaming for this test
        )
        print("Response received from ZhipuAI")  # Log response retrieval
        response_text = response.choices[0].message.content
        return response_text
        
    except Exception as e:
        print(f"Error in get_zhipuai_response: {e}")  # More informative error message
        return f"Error: {e}"