Spaces:
Running
Running
File size: 2,600 Bytes
7960aab 800cd8e 75dd735 b129d23 75dd735 b129d23 43cce15 b129d23 8205bd6 b129d23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import os
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema import SystemMessage
import streamlit as st
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
import nltk
import json
import pandas as pd
# Download nltk stopwords
nltk.download('stopwords')
# Function to load the conversation history
def load_conversation_history(file):
with open(file, 'r') as f:
return json.load(f)
# Function to save the conversation history
def save_conversation_history(history, file):
with open(file, 'w') as f:
json.dump(history, f)
# Initialize conversation history
conversation_history = []
if st.session_state.get('conversation_history'):
conversation_history = st.session_state.conversation_history
# Title
st.title('Culture AI v.0.1')
# Get the Hugging Face access token from the environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
# Model selection
model_name = st.selectbox('Choose a model:', [
'meta-llama/Llama-3.2-11B-Vision-Instruct',
'speakleash/Bielik-11B-v2.3-Instruct',
# Add your private model here
])
# Upload dataset
dataset_file = st.file_uploader('Upload your dataset (CSV format)', type='csv')
if dataset_file:
df = pd.read_csv(dataset_file)
# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(model_name)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=True)
llm = HuggingFacePipeline(pipeline=pipe)
# Chat interface
st.write('## Chat')
topic = st.text_input('Enter a topic for the conversation:', 'Machine Learning')
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content=f"Write a response related to the input topic in one paragraph"),
HumanMessagePromptTemplate.from_template("```{topic}```"),
])
chain = prompt | llm
if st.button('Generate Response'):
output = chain.invoke({"topic": topic})
st.write(output.content)
conversation_history.append({"user": topic, "assistant": output.content})
# Save conversation history
if st.button('Save Conversation History'):
save_conversation_history(conversation_history, 'conversation_history.json')
st.success('Conversation history saved!')
# Display conversation history
st.write('## Conversation History')
st.write(conversation_history)
# Update session state for conversation history
st.session_state.conversation_history = conversation_history |