import httpx import os import requests import gradio as gr import openai from fastapi import Depends, FastAPI, Request from app.db import User, create_db_and_tables from app.schemas import UserCreate, UserRead, UserUpdate from app.users import auth_backend, current_active_user, fastapi_users from dotenv import load_dotenv import examples as chatbot_examples # Get the current environment from the environment variable current_environment = os.getenv("APP_ENV", "dev") # Load the appropriate .env file based on the current environment if current_environment == "dev": load_dotenv(".env.dev") elif current_environment == "test": load_dotenv(".env.test") elif current_environment == "prod": load_dotenv(".env.prod") else: raise ValueError("Invalid environment specified") def api_login(email, password): port = os.getenv("APP_PORT") scheme = os.getenv("APP_SCHEME") host = os.getenv("APP_HOST") url = f"{scheme}://{host}:{port}/auth/jwt/login" payload = { 'username': email, 'password': password } headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.post( url, data=payload, headers=headers ) if(response.status_code==200): response_json = response.json() api_key = response_json['access_token'] return True, api_key else: response_json = response.json() detail = response_json['detail'] return False, detail def get_api_key(email, password): successful, message = api_login(email, password) if(successful): return os.getenv("APP_API_BASE"), message else: raise gr.Error(message) return "", "" # Define a function to get the AI's reply using the OpenAI API def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperature=0, message_history=[]): # Initialize the messages list messages = [] # Add the system message to the messages list if system_message is not None: messages += [{"role": "system", "content": system_message}] # Add the message history to the messages list if message_history is not None: messages += message_history # Add the user's message to the messages list messages += [{"role": "user", "content": message}] # Make an API call to the OpenAI ChatCompletion endpoint with the model and messages completion = openai.ChatCompletion.create( model=model, messages=messages, temperature=temperature ) # Extract and return the AI's response from the API response return completion.choices[0].message.content.strip() # Define a function to handle the chat interaction with the AI model def chat(model, system_message, message, chatbot_messages, history_state): # Initialize chatbot_messages and history_state if they are not provided chatbot_messages = chatbot_messages or [] history_state = history_state or [] # Try to get the AI's reply using the get_ai_reply function try: ai_reply = get_ai_reply(message, model=model, system_message=system_message, message_history=history_state) except Exception as e: # If an error occurs, raise a Gradio error raise gr.Error(e) # Append the user's message and the AI's reply to the chatbot_messages list chatbot_messages.append((message, ai_reply)) # Append the user's message and the AI's reply to the history_state list history_state.append({"role": "user", "content": message}) history_state.append({"role": "assistant", "content": ai_reply}) # Return None (empty out the user's message textbox), the updated chatbot_messages, and the updated history_state return None, chatbot_messages, history_state # Define a function to launch the chatbot interface using Gradio def get_chatbot_app(additional_examples=[]): # Load chatbot examples and merge with any additional examples provided examples = chatbot_examples.load_examples(additional=additional_examples) # Define a function to get the names of the examples def get_examples(): return [example["name"] for example in examples] # Define a function to choose an example based on the index def choose_example(index): if(index!=None): system_message = examples[index]["system_message"].strip() user_message = examples[index]["message"].strip() return system_message, user_message, [], [] else: return "", "", [], [] # Create the Gradio interface using the Blocks layout with gr.Blocks() as app: with gr.Tab("Conversation"): with gr.Row(): with gr.Column(): # Create a dropdown to select examples example_dropdown = gr.Dropdown(get_examples(), label="Examples", type="index") # Create a button to load the selected example example_load_btn = gr.Button(value="Load") # Create a textbox for the system message (prompt) system_message = gr.TextArea(label="System Message (Prompt)", value="You are a helpful assistant.", lines=20, max_lines=400) with gr.Column(): # Create a dropdown to select the AI model model_selector = gr.Dropdown( ["gpt-3.5-turbo"], label="Model", value="gpt-3.5-turbo" ) # Create a chatbot interface for the conversation chatbot = gr.Chatbot(label="Conversation") # Create a textbox for the user's message message = gr.Textbox(label="Message") # Create a state object to store the conversation history history_state = gr.State() # Create a button to send the user's message btn = gr.Button(value="Send") # Connect the example load button to the choose_example function example_load_btn.click(choose_example, inputs=[example_dropdown], outputs=[system_message, message, chatbot, history_state]) # Connect the send button to the chat function btn.click(chat, inputs=[model_selector, system_message, message, chatbot, history_state], outputs=[message, chatbot, history_state]) with gr.Tab("Get API Key"): email_box = gr.Textbox(label="Email Address", placeholder="Student Email") password_box = gr.Textbox(label="Password", type="password", placeholder="Student ID") btn = gr.Button(value ="Generate") api_host_box = gr.Textbox(label="OpenAI API Base", interactive=False) api_key_box = gr.Textbox(label="OpenAI API Key", interactive=False) btn.click(get_api_key, inputs = [email_box, password_box], outputs = [api_host_box, api_key_box]) # Return the app return app app = FastAPI() app.include_router( fastapi_users.get_auth_router(auth_backend), prefix="/auth/jwt", tags=["auth"] ) app.include_router( fastapi_users.get_register_router(UserRead, UserCreate), prefix="/auth", tags=["auth"], ) app.include_router( fastapi_users.get_users_router(UserRead, UserUpdate), prefix="/users", tags=["users"], ) @app.get("/authenticated-route") async def authenticated_route(user: User = Depends(current_active_user)): return {"message": f"Hello {user.email}!"} @app.post("/v1/chat/completions") async def openai_api_chat_completions_passthrough( request: Request, user: User = Depends(fastapi_users.current_user()), ): if not user: raise HTTPException(status_code=401, detail="Unauthorized") # Get the request data and headers request_data = await request.json() request_headers = request.headers openai_api_key = os.getenv("OPENAI_API_KEY") if(request_data['model']=='gpt-4' or request_data['model'] == 'gpt-4-32k'): print("User requested gpt-4, falling back to gpt-3.5-turbo") request_data['model'] = 'gpt-3.5-turbo' # Forward the request to the OpenAI API response = requests.post( "https://api.openai.com/v1/chat/completions", json=request_data, headers={ "Content-Type": request_headers.get("Content-Type"), "Authorization": f"Bearer {openai_api_key}", }, ) print(response) # Return the OpenAI API response return response.json() @app.on_event("startup") async def on_startup(): # Not needed if you setup a migration system like Alembic await create_db_and_tables() gradio_gui = get_chatbot_app() gradio_gui.auth = api_login gradio_gui.auth_message = "Hello" app = gr.mount_gradio_app(app, gradio_gui, path="/gradio")