papasega's picture
Update app.py
3ebac32 verified
raw
history blame
1.61 kB
import os
import threading
import gradio as gr
from llama_index.llms.ollama import Ollama
from llama_index.core.llms import ChatMessage
# Configurer le proxy Ollama
os.environ['OLLAMA_HOST'] = '127.0.0.1:11434'
os.environ['OLLAMA_ORIGINS'] = '*'
llm = Ollama(model="llama3", request_timeout=120.0)
def get_completion(prompt):
response = llm.complete(prompt)
return response
def chat_with_llm(messages):
chat_messages = [ChatMessage(role=msg["role"], content=msg["content"]) for msg in messages]
response = llm.chat(chat_messages)
return response
def generate_response(prompt):
return get_completion(prompt)
def generate_chat_response(history):
messages = [{"role": "system", "content": "You are a pirate with a colorful personality"}]
for item in history:
messages.append({"role": "user", "content": item[0]})
if item[1]:
messages.append({"role": "assistant", "content": item[1]})
response = chat_with_llm(messages)
return response["content"]
single_input = gr.Textbox(lines=2, placeholder="Enter your prompt here...")
single_output = gr.Textbox()
single_interface = gr.Interface(fn=generate_response, inputs=single_input, outputs=single_output, title="LLM Single Completion")
chat_input = gr.Chatbot()
chat_output = gr.Textbox()
chat_interface = gr.Interface(fn=generate_chat_response, inputs=chat_input, outputs=chat_output, title="LLM Chat")
app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])
if __name__ == "__main__":
app.launch(server_name="0.0.0.0", server_port=7860, share=True)