Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,13 @@
|
|
1 |
import os
|
2 |
import threading
|
3 |
-
import subprocess
|
4 |
import gradio as gr
|
5 |
from llama_index.llms.ollama import Ollama
|
6 |
from llama_index.core.llms import ChatMessage
|
7 |
|
8 |
-
#
|
9 |
-
|
10 |
-
|
11 |
-
os.environ['OLLAMA_ORIGINS'] = '*'
|
12 |
-
subprocess.run(["ollama", "serve"])
|
13 |
|
14 |
-
# Démarrer le serveur Ollama dans un thread séparé
|
15 |
-
ollama_thread = threading.Thread(target=start_ollama)
|
16 |
-
ollama_thread.start()
|
17 |
-
|
18 |
-
# Initialiser le modèle Ollama
|
19 |
llm = Ollama(model="llama3", request_timeout=120.0)
|
20 |
|
21 |
def get_completion(prompt):
|
@@ -50,4 +42,4 @@ chat_interface = gr.Interface(fn=generate_chat_response, inputs=chat_input, outp
|
|
50 |
app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])
|
51 |
|
52 |
if __name__ == "__main__":
|
53 |
-
app.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
1 |
import os
|
2 |
import threading
|
|
|
3 |
import gradio as gr
|
4 |
from llama_index.llms.ollama import Ollama
|
5 |
from llama_index.core.llms import ChatMessage
|
6 |
|
7 |
+
# Configurer le proxy Ollama
|
8 |
+
os.environ['OLLAMA_HOST'] = '127.0.0.1:11434'
|
9 |
+
os.environ['OLLAMA_ORIGINS'] = '*'
|
|
|
|
|
10 |
|
|
|
|
|
|
|
|
|
|
|
11 |
llm = Ollama(model="llama3", request_timeout=120.0)
|
12 |
|
13 |
def get_completion(prompt):
|
|
|
42 |
app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])
|
43 |
|
44 |
if __name__ == "__main__":
|
45 |
+
app.launch(server_name="0.0.0.0", server_port=7860, share=True)
|