import streamlit as st import sparknlp from sparknlp.base import * from sparknlp.annotator import * from pyspark.ml import Pipeline # Page configuration st.set_page_config( layout="wide", initial_sidebar_state="auto" ) # CSS for styling st.markdown(""" """, unsafe_allow_html=True) @st.cache_resource def init_spark(): return sparknlp.start() @st.cache_resource def create_pipeline(model): document_assembler = DocumentAssembler() \ .setInputCol("text") \ .setOutputCol("documents") sentence_detector = SentenceDetectorDLModel\ .pretrained()\ .setInputCols(["documents"])\ .setOutputCol("questions") t5 = T5Transformer()\ .pretrained("google_t5_small_ssm_nq")\ .setInputCols(["questions"])\ .setOutputCol("answers")\ pipeline = Pipeline().setStages([document_assembler, sentence_detector, t5]) return pipeline def fit_data(pipeline, data): df = spark.createDataFrame([[data]]).toDF("text") result = pipeline.fit(df).transform(df) return result.select('answers.result').collect() # Sidebar content model = st.sidebar.selectbox( "Choose the pretrained model", ['google_t5_small_ssm_nq'], help="For more info about the models visit: https://sparknlp.org/models" ) # Set up the page layout title, sub_title = ( 'Automatically Answer Questions (CLOSED BOOK)', 'Automatically generate answers to questions without context.' ) st.markdown(f'
{title}
', unsafe_allow_html=True) st.write(sub_title) # Reference notebook link in sidebar link = """ Open In Colab """ st.sidebar.markdown('Reference notebook:') st.sidebar.markdown(link, unsafe_allow_html=True) # Load examples examples = [ "Who is Clark Kent?", "Which is the capital of Bulgaria ?", "Which country tops the annual global democracy index compiled by the economist intelligence unit?", "In which city is the Eiffel Tower located?", "Who is the founder of Microsoft?" ] selected_text = st.selectbox("Select an example", examples) custom_input = st.text_input("Try it with your own Sentence!") text_to_analyze = custom_input if custom_input else selected_text st.write('Question:') HTML_WRAPPER = """
{}
""" st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True) # Initialize Spark and create pipeline spark = init_spark() pipeline = create_pipeline(model) output = fit_data(pipeline, text_to_analyze) # Display matched sentence st.write("Answer:") output_text = "".join(output[0][0]) st.markdown(HTML_WRAPPER.format(output_text.title()), unsafe_allow_html=True)