File size: 7,292 Bytes
aec35e1
29f0e58
aec35e1
 
 
9206a9e
 
29f0e58
aec35e1
9206a9e
 
 
aec35e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9206a9e
 
 
29f0e58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9206a9e
 
29f0e58
 
 
 
 
 
 
 
9206a9e
29f0e58
 
 
 
 
9206a9e
 
29f0e58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ba2725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import os
from typing import List, Tuple, Dict, Any

import openai
import streamlit as st
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI as l_OpenAI
import requests

OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)


def call_chatgpt(query: str, model: str = "gpt-3.5-turbo") -> str:
    """
    Generates a response to a query using the specified language model.

    Args:
        query (str): The user's query that needs to be processed.
        model (str, optional): The language model to be used. Defaults to "gpt-3.5-turbo".

    Returns:
        str: The generated response to the query.
    """

    # Prepare the conversation context with system and user messages.
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": f"Question: {query}."},
    ]

    # Use the OpenAI client to generate a response based on the model and the conversation context.
    response = openai_client.chat.completions.create(
        model=model,
        messages=messages,
    )

    # Extract the content of the response from the first choice.
    content: str = response.choices[0].message.content

    # Return the generated content.
    return content


def call_langchain(prompt: str) -> str:
    """
    Initializes a language model with specific settings, loads additional tools, initializes an agent with these tools,
    and then runs the agent with a given prompt to produce a text response.

    Args:
    prompt (str): The input text prompt that the agent will process.

    Returns:
    str: The text output produced by the agent after processing the input prompt.
    """

    # Initialize the OpenAI language model with a specified temperature
    # and the OpenAI API key. It's assumed that `l_OpenAI` is a class or function
    # that is responsible for setting up the language model with the given parameters.
    llm = l_OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)  # Type: ignore

    # Load additional tools needed for the agent. Here, 'serpapi' for search engine results
    # and 'llm-math' for math capabilities are loaded, along with their respective API keys.
    # The `load_tools` function is assumed to return a dictionary of initialized tools,
    # with the tools being ready for use by the agent.
    tools = load_tools(  # Type: ignore
        ["serpapi", "llm-math"], llm=llm, serpapi_api_key=SERPAPI_API_KEY
    )

    # Initialize the agent with the provided tools, the language model, and specific agent settings.
    # The agent is set to a ZERO_SHOT_REACT_DESCRIPTION type, which likely defines its behavior
    # or capabilities, with verbosity enabled for detailed logs.
    # The `initialize_agent` function presumably returns an instance of an agent configured
    # with the specified tools and settings, ready to process prompts.
    agent = initialize_agent(  # Type: ignore
        tools, llm, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
    )

    # Execute the agent with the given prompt and capture the output.
    # The `run` method of the agent is assumed to process the prompt and return a string response,
    # which is then returned by this function to the caller.
    output: str = agent.run(prompt)

    return output


def query(payload: Dict[str, Any]) -> Dict[str, Any]:
    """
    Sends a JSON payload to a predefined API URL and returns the JSON response.

    Args:
        payload (Dict[str, Any]): The JSON payload to be sent to the API.

    Returns:
        Dict[str, Any]: The JSON response received from the API.
    """

    # API endpoint URL
    API_URL = "https://sks7h7h5qkhoxwxo.us-east-1.aws.endpoints.huggingface.cloud"
    
    # Headers to indicate both the request and response formats are JSON
    headers = {
        "Accept": "application/json",
        "Content-Type": "application/json"
    }
    
    # Sending a POST request with the JSON payload and headers
    response = requests.post(API_URL, headers=headers, json=payload)
    
    # Returning the JSON response
    return response.json()

def llama2_7b_ysa(prompt: str) -> str:
    """
    Queries a model and retrieves the generated text based on the given prompt.

    This function sends a prompt to a model (presumably named 'llama2_7b') and extracts
    the generated text from the model's response. It's tailored for handling responses
    from a specific API or model query structure where the response is expected to be
    a list of dictionaries, with at least one dictionary containing a key 'generated_text'.

    Parameters:
    - prompt (str): The text prompt to send to the model.

    Returns:
    - str: The generated text response from the model.

    Note:
    - The function assumes that the 'query' function is previously defined and accessible
      within the same scope or module. It should send a request to the model and return
      the response in a structured format.
    - The 'parameters' dictionary is passed empty but can be customized to include specific
      request parameters as needed by the model API.
    """

    # Define the query payload with the prompt and any additional parameters
    query_payload: Dict[str, Any] = {
        "inputs": prompt,
        "parameters": {}
    }

    # Send the query to the model and store the output response
    output = query(query_payload)

    # Extract the 'generated_text' from the first item in the response list
    response: str = output[0]['generated_text']

    return response


def llama2_7b_brk_letters(prompt: str) -> str:
    """
    Queries the LLaMA 2 7B model hosted on a specific Hugging Face endpoint with a given prompt,
    and returns the generated text as a response.

    Args:
        prompt (str): The input text prompt to be sent to the LLaMA 2 7B model for generating text.

    Returns:
        str: The text generated by the LLaMA 2 7B model in response to the input prompt.
    """
    
    # Endpoint URL of the LLaMA 2 7B model hosted on Hugging Face.
    API_URL = "https://hd1rl4q31aom5qwc.us-east-1.aws.endpoints.huggingface.cloud"
    
    # Headers to include in the HTTP request. Specifies the expected format of the response and request.
    headers = {
        "Accept": "application/json",
        "Content-Type": "application/json"
    }
    
    def query_llama2_7b_brk_letters(payload: dict) -> dict:
        """
        Sends a POST request to the LLaMA 2 7B API endpoint with a given payload.

        Args:
            payload (dict): The data to be sent in the POST request, including the input prompt
            and any parameters for the model.

        Returns:
            dict: The JSON response from the API, parsed into a dictionary.
        """
        response = requests.post(API_URL, headers=headers, json=payload)
        return response.json()
    
    # The payload for the POST request, including the prompt and any model parameters.
    output = query_llama2_7b_brk_letters({
        "inputs": prompt,
        "parameters": {}
    })

    # Extracts the generated text from the API's response.
    response = output[0]['generated_text']

    return response