Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,28 +1,21 @@
|
|
1 |
-
import httpx
|
2 |
-
from llama_index.llms.ollama import Ollama
|
3 |
-
from llama_index.core.llms import ChatMessage
|
4 |
-
import gradio as gr
|
5 |
-
|
6 |
import os
|
7 |
import threading
|
8 |
import subprocess
|
|
|
|
|
|
|
9 |
|
10 |
-
|
|
|
11 |
os.environ['OLLAMA_HOST'] = '127.0.0.1:11434'
|
12 |
os.environ['OLLAMA_ORIGINS'] = '*'
|
13 |
-
subprocess.
|
14 |
|
15 |
-
|
|
|
16 |
ollama_thread.start()
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
proxies = {
|
22 |
-
"http://": "http://localhost:11434",
|
23 |
-
"https://": "http://localhost:11434",
|
24 |
-
}
|
25 |
-
|
26 |
llm = Ollama(model="llama3", request_timeout=120.0)
|
27 |
|
28 |
def get_completion(prompt):
|
@@ -56,4 +49,5 @@ chat_interface = gr.Interface(fn=generate_chat_response, inputs=chat_input, outp
|
|
56 |
|
57 |
app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])
|
58 |
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import threading
|
3 |
import subprocess
|
4 |
+
import gradio as gr
|
5 |
+
from llama_index.llms.ollama import Ollama
|
6 |
+
from llama_index.core.llms import ChatMessage
|
7 |
|
8 |
+
# Fonction pour démarrer le serveur Ollama
|
9 |
+
def start_ollama():
|
10 |
os.environ['OLLAMA_HOST'] = '127.0.0.1:11434'
|
11 |
os.environ['OLLAMA_ORIGINS'] = '*'
|
12 |
+
subprocess.run(["ollama", "serve"])
|
13 |
|
14 |
+
# Démarrer le serveur Ollama dans un thread séparé
|
15 |
+
ollama_thread = threading.Thread(target=start_ollama)
|
16 |
ollama_thread.start()
|
17 |
|
18 |
+
# Initialiser le modèle Ollama
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
llm = Ollama(model="llama3", request_timeout=120.0)
|
20 |
|
21 |
def get_completion(prompt):
|
|
|
49 |
|
50 |
app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])
|
51 |
|
52 |
+
if __name__ == "__main__":
|
53 |
+
app.launch(server_name="0.0.0.0", server_port=7860)
|