Spaces:
Running
Running
import streamlit as st | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
from langchain import HuggingFacePipeline | |
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessage | |
from langchain_core.messages import SystemMessage | |
import nltk | |
import json | |
import pandas as pd | |
# Download nltk stopwords | |
nltk.download('stopwords') | |
# Function to load the conversation history | |
def load_conversation_history(file): | |
with open(file, 'r') as f: | |
return json.load(f) | |
# Function to save the conversation history | |
def save_conversation_history(history, file): | |
with open(file, 'w') as f: | |
json.dump(history, f) | |
# Initialize conversation history | |
conversation_history = [] | |
if st.session_state.get('conversation_history'): | |
conversation_history = st.session_state.conversation_history | |
# Title | |
st.title('Culture AI v.0.1') | |
# Model selection | |
model_name = st.selectbox('Choose a model:', [ | |
'mistralai/Mistral-7B-Instruct-v0.1', | |
'meta-llama/Meta-Llama-3–8B', | |
'microsoft/Phi-3-mini-4k-instruct', | |
'microsoft/phi-1_5', | |
'speakleash/Bielik-11B-v2.3-Instruct' | |
# Add more models as needed | |
]) | |
# Upload dataset | |
dataset_file = st.file_uploader('Upload your dataset (CSV format)', type='csv') | |
if dataset_file: | |
df = pd.read_csv(dataset_file) | |
# Initialize tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=True) | |
llm = HuggingFacePipeline(pipeline=pipe) | |
# Chat interface | |
st.write('## Chat') | |
topic = st.text_input('Enter a topic for the conversation:', 'Machine Learning') | |
prompt = ChatPromptTemplate.from_messages([ | |
SystemMessage(content=f"Write a response related to the input topic in one paragraph"), | |
HumanMessagePromptTemplate.from_template("```{topic}```"), | |
]) | |
chain = prompt | llm | |
if st.button('Generate Response'): | |
output = chain.invoke({"topic": topic}) | |
st.write(output.content) | |
conversation_history.append({"user": topic, "assistant": output.content}) | |
# Save conversation history | |
if st.button('Save Conversation History'): | |
save_conversation_history(conversation_history, 'conversation_history.json') | |
st.success('Conversation history saved!') | |
# Display conversation history | |
st.write('## Conversation History') | |
st.write(conversation_history) | |
# Update session state for conversation history | |
st.session_state.conversation_history = conversation_history |