Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
# Charger le modèle et le tokenizer
|
@@ -6,10 +6,15 @@ model_name = "Rimyy/MISTRAL-finetuneGSMdata1exp"
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
|
|
9 |
def predict(prompt):
|
10 |
inputs = tokenizer(prompt, return_tensors="pt")
|
11 |
outputs = model.generate(inputs.input_ids, max_length=256)
|
12 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
13 |
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
# Charger le modèle et le tokenizer
|
|
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
+
# Fonction de prédiction
|
10 |
def predict(prompt):
|
11 |
inputs = tokenizer(prompt, return_tensors="pt")
|
12 |
outputs = model.generate(inputs.input_ids, max_length=256)
|
13 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
14 |
|
15 |
+
# Interface utilisateur Streamlit
|
16 |
+
st.title("Modèle de génération de texte")
|
17 |
+
prompt = st.text_area("Entrez votre texte:")
|
18 |
+
if st.button("Générer"):
|
19 |
+
result = predict(prompt)
|
20 |
+
st.text_area("Résultat", value=result, height=200)
|