File size: 1,911 Bytes
810915b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import pandas as pd
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

# Load dataset
dataset = load_dataset("Cosmos-AI/Cosmos-dataset")

# Convert dataset to pandas DataFrame
dataset_df = pd.DataFrame(dataset['train'])  # Assuming 'train' split contains both questions and answers

# Prepare data
questions = dataset_df['Question'].astype(str).tolist()
answers = dataset_df['Answer'].astype(str).tolist()

# Load tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(questions + answers)
word_index = tokenizer.word_index

# Load trained model
model = load_model("conversation_model.h5")

# Function to generate response
def generate_response(input_text):
    # Tokenize input text
    input_sequence = tokenizer.texts_to_sequences([input_text])
    input_sequence = pad_sequences(input_sequence, maxlen=max_sequence_length, padding='post')

    # Generate response
    predicted_sequence = model.predict(input_sequence)

    # Decode predicted sequence
    response = ""
    for timestep in predicted_sequence[0]:
        predicted_word_index = np.argmax(timestep)
        if predicted_word_index in word_index.values():
            predicted_word = next(word for word, idx in word_index.items() if idx == predicted_word_index)
            if predicted_word == 'eos':  # 'eos' marks the end of the sequence
                break
            response += predicted_word + " "
        else:
            response += ' '  # If predicted index not found in word_index

    return response.strip()

# Define Gradio interface
iface = gr.Interface(
    fn=generate_response,
    inputs="text",
    outputs="text",
    title="Conversation Model",
    description="Enter your message and get a response from the conversational model."
)

# Launch the interface
iface.launch()