import vertexai | |
import http.client | |
import typing | |
import urllib.request | |
from vertexai.preview.generative_models import GenerativeModel ,ChatSession | |
import streamlit as st | |
from io import StringIO | |
import pandas as pd | |
from streamlit_extras.let_it_rain import rain | |
import time | |
import nltk | |
from textblob import TextBlob | |
st.set_page_config (page_icon="icon.jpg",page_title="Content Moderation",layout="wide") | |
st.markdown("<h1 style='color: var(--black-100, var(--black-100, #1C1C1C));text-align: center;font-feature-settings: 'cv11' on, 'cv01' on, 'ss01' on;font-family: Poppins;font-size: 48px;font-style: normal;font-weight: 600;line-height: 58px;'>Content Moderation</h1>", | |
unsafe_allow_html=True) | |
project_id = "agileai-poc" | |
loc = "us-central1" | |
model= GenerativeModel("gemini-pro") | |
# prompt="""understand the content provided and if any spaces found in content ignore and generate the output only in the given format | |
# format: 1.Tone:find the tone of content | |
# 2.Negative sentences :"Only find the negative sentences based on semantic analysis" to ensure accuracy in detecting negative words in pointwise and "must highlight the negative words in bold" in same sentence | |
# if the content is in positive tone give output as "No changes required" | |
# """ | |
# prompt2=""" understand the content provided and generate the output only in the given format | |
# format: 1.Tone :provide the generated content tone | |
# 2.Content:"Don't explain the content" just modify the same content by only "Replacing the negative words by converting the tone into formal",if the content doesn't have any negative words give output as "No changes required" """ | |
prompt="""find the tone of content state whether it is positive or negative and generate the output only in the given format | |
format: Tone:just specify the tone is positive or negative """ | |
# if input_text: | |
## Define layout and containers | |
HEIGHT = 1000 | |
cols = st.columns(2) | |
with cols[0]: | |
left_panel = st.container(height=HEIGHT + 15, border=True) | |
with cols[1]: | |
right_panel=st.container(height=HEIGHT +15,border=True) | |
## Add contents | |
def offensive_text(input): | |
if input : | |
response=model.generate_content([prompt,input]) | |
tone=response.text | |
if tone=="negative": | |
# print(response) | |
output=st.write(response.text) | |
check=st.write("negative") | |
elif tone=="positive": | |
with right_panel: | |
# print(response) | |
# st.write(response.text) | |
check=st.write("positive") | |
else : | |
check=st.write("Your content needs to be modified ..!!") | |
return check | |
# with left_panel: | |
# st.markdown("<h5 style='font-style:bold,color:blue'>Fans Dashboard</h5>",unsafe_allow_html=True) | |
# input=st.chat_input("Type your comment") | |
# offensive_text(input) | |
# model = GenerativeModel("gemini-pro") | |
chat = model.start_chat(history=[]) | |
# def get_chat_response(chat: ChatSession, prompt: str) -> str: | |
# response = chat.send_message(prompt) | |
# return response.text | |
# Function to determine the tone (positive/negative) of the input | |
def determine_tone(input_text): | |
# Perform sentiment analysis using TextBlob | |
analysis = TextBlob(input_text) | |
# Determine the polarity of the sentiment | |
polarity = analysis.sentiment.polarity | |
# Check if the sentiment polarity is positive, negative, or neutral | |
if polarity > 0: | |
return "Positive" | |
elif polarity < 0: | |
return "Negative" | |
else: | |
return "Neutral" | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
with left_panel: | |
st.markdown("<h5 style='font-style:bold,color:blue'>Fans Dashboard</h5>",unsafe_allow_html=True) | |
with right_panel: | |
st.markdown("<h5 style='font-style:bold,color:blue'>Celeb Dashboard</h5>",unsafe_allow_html=True) | |
# Example usage | |
input_text = st.chat_input("type your comment") | |
if input_text: | |
with left_panel: | |
# st.markdown("<h5 style='font-style:bold,color:blue'>Fans Dashboard</h5>",unsafe_allow_html=True) | |
if input_text: | |
tone = determine_tone(input_text) | |
# st.write(tone) | |
if tone=="Negative": | |
st.write(":x:",input_text) | |
# st.text_area(input_text) | |
elif tone=="Positive": | |
st.write(":heavy_check_mark:",input_text) | |
else: | |
st.write("your content need to modifed") | |
with right_panel: | |
# st.markdown("<h5 style='font-style:bold,color:blue'>Celeb Dashboard</h5>",unsafe_allow_html=True) | |
tone = determine_tone(input_text) | |
if tone=="Positive": | |
st.write(":heavy_check_mark:",input_text) | |
# print("The tone of the input is:", tone) | |
#print("the tone is not required :") | |
# prompt = st.chat_input("Type your comment") | |
# # if prompt: | |
# # # st.write(get_chat_response(chat, prompt)) | |
# st.write(offensive_text(prompt)) | |
# prompt = "What are all the colors in a rainbow?" | |
# print(get_chat_response(chat, prompt)) | |
# prompt = "Why does it appear when it rains?" | |
# print(get_chat_response(chat, prompt)) | |
# # try: | |
# with upper_right_panel: | |
# st.markdown("<h6 style='color:red;font-style:bold'>Analyzed Content:</h6>",unsafe_allow_html=True) | |
# st.write(response.text) | |
# with lower_right_panel: | |
# st.markdown("<h6 style='color:green;font-style:bold'>Rephrased content:</h6>",unsafe_allow_html=True) | |
# reference=model.generate_content([prompt2,response.text]) | |
# st.write(reference.text) | |
# except: | |
# st.error("Check the input text ,avoid empty spaces if any.") | |
# st.stop() | |
# if "messages" not in st.session_state: | |
# st.session_state.messages = [] | |
# for message in st.session_state.messages: | |
# with st.chat_message(message["role"]): | |
# st.markdown(message["content"]) | |
# queries = st.chat_input("Ask your queries here!") | |
# if prompt := queries: | |
# # Display user message in chat message container | |
# with st.chat_message("user"): | |
# st.markdown(prompt) | |
# # Add user message to chat history | |
# st.session_state.messages.append({"role": "user", "content": prompt}) | |
# ask_url = "http://127.0.0.1:5000/predict" | |
# question_data = {"questions": prompt} | |
# with st.spinner("..."): | |
# response = requests.post( | |
# url=ask_url, | |
# json=question_data, | |
# ) | |
# # response=prompt | |
# answer = response.json().get("predicted_answer") | |
# with st.chat_message("Assistant"): | |
# print("Ans : ", answer) | |
# st.write(answer) | |
# st.session_state.messages.append({"role": "Assistant", "content": answer}) |