Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
from tensorflow.keras.models import load_model
|
4 |
+
from tensorflow.keras.preprocessing.text import Tokenizer
|
5 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
6 |
+
|
7 |
+
# Load dataset
|
8 |
+
dataset = load_dataset("Cosmos-AI/Cosmos-dataset")
|
9 |
+
|
10 |
+
# Convert dataset to pandas DataFrame
|
11 |
+
dataset_df = pd.DataFrame(dataset['train']) # Assuming 'train' split contains both questions and answers
|
12 |
+
|
13 |
+
# Prepare data
|
14 |
+
questions = dataset_df['Question'].astype(str).tolist()
|
15 |
+
answers = dataset_df['Answer'].astype(str).tolist()
|
16 |
+
|
17 |
+
# Load tokenizer
|
18 |
+
tokenizer = Tokenizer()
|
19 |
+
tokenizer.fit_on_texts(questions + answers)
|
20 |
+
word_index = tokenizer.word_index
|
21 |
+
|
22 |
+
# Load trained model
|
23 |
+
model = load_model("conversation_model.h5")
|
24 |
+
|
25 |
+
# Function to generate response
|
26 |
+
def generate_response(input_text):
|
27 |
+
# Tokenize input text
|
28 |
+
input_sequence = tokenizer.texts_to_sequences([input_text])
|
29 |
+
input_sequence = pad_sequences(input_sequence, maxlen=max_sequence_length, padding='post')
|
30 |
+
|
31 |
+
# Generate response
|
32 |
+
predicted_sequence = model.predict(input_sequence)
|
33 |
+
|
34 |
+
# Decode predicted sequence
|
35 |
+
response = ""
|
36 |
+
for timestep in predicted_sequence[0]:
|
37 |
+
predicted_word_index = np.argmax(timestep)
|
38 |
+
if predicted_word_index in word_index.values():
|
39 |
+
predicted_word = next(word for word, idx in word_index.items() if idx == predicted_word_index)
|
40 |
+
if predicted_word == 'eos': # 'eos' marks the end of the sequence
|
41 |
+
break
|
42 |
+
response += predicted_word + " "
|
43 |
+
else:
|
44 |
+
response += ' ' # If predicted index not found in word_index
|
45 |
+
|
46 |
+
return response.strip()
|
47 |
+
|
48 |
+
# Define Gradio interface
|
49 |
+
iface = gr.Interface(
|
50 |
+
fn=generate_response,
|
51 |
+
inputs="text",
|
52 |
+
outputs="text",
|
53 |
+
title="Conversation Model",
|
54 |
+
description="Enter your message and get a response from the conversational model."
|
55 |
+
)
|
56 |
+
|
57 |
+
# Launch the interface
|
58 |
+
iface.launch()
|