abdullahmubeen10
commited on
Commit
•
5da5ab8
1
Parent(s):
077a97e
Upload 5 files
Browse files- .streamlit/config.toml +3 -0
- Demo.py +116 -0
- Dockerfile +72 -0
- pages/Workflow & Model Overview.py +171 -0
- requirements.txt +7 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base="light"
|
3 |
+
primaryColor="#29B4E8"
|
Demo.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import sparknlp
|
3 |
+
|
4 |
+
from sparknlp.base import *
|
5 |
+
from sparknlp.annotator import *
|
6 |
+
from pyspark.ml import Pipeline
|
7 |
+
|
8 |
+
# Page configuration
|
9 |
+
st.set_page_config(
|
10 |
+
layout="wide",
|
11 |
+
initial_sidebar_state="auto"
|
12 |
+
)
|
13 |
+
|
14 |
+
# CSS for styling
|
15 |
+
st.markdown("""
|
16 |
+
<style>
|
17 |
+
.main-title {
|
18 |
+
font-size: 36px;
|
19 |
+
color: #4A90E2;
|
20 |
+
font-weight: bold;
|
21 |
+
text-align: center;
|
22 |
+
}
|
23 |
+
.section {
|
24 |
+
background-color: #f9f9f9;
|
25 |
+
padding: 10px;
|
26 |
+
border-radius: 10px;
|
27 |
+
margin-top: 10px;
|
28 |
+
}
|
29 |
+
.section p, .section ul {
|
30 |
+
color: #666666;
|
31 |
+
}
|
32 |
+
</style>
|
33 |
+
""", unsafe_allow_html=True)
|
34 |
+
|
35 |
+
@st.cache_resource
|
36 |
+
def init_spark():
|
37 |
+
return sparknlp.start()
|
38 |
+
|
39 |
+
@st.cache_resource
|
40 |
+
def create_pipeline(model):
|
41 |
+
document_assembler = DocumentAssembler() \
|
42 |
+
.setInputCol("text") \
|
43 |
+
.setOutputCol("documents")
|
44 |
+
|
45 |
+
sentence_detector = SentenceDetectorDLModel\
|
46 |
+
.pretrained()\
|
47 |
+
.setInputCols(["documents"])\
|
48 |
+
.setOutputCol("questions")
|
49 |
+
|
50 |
+
t5 = T5Transformer()\
|
51 |
+
.pretrained("google_t5_small_ssm_nq")\
|
52 |
+
.setInputCols(["questions"])\
|
53 |
+
.setOutputCol("answers")\
|
54 |
+
|
55 |
+
pipeline = Pipeline().setStages([document_assembler, sentence_detector, t5])
|
56 |
+
return pipeline
|
57 |
+
|
58 |
+
def fit_data(pipeline, data):
|
59 |
+
df = spark.createDataFrame([[data]]).toDF("text")
|
60 |
+
result = pipeline.fit(df).transform(df)
|
61 |
+
return result.select('answers.result').collect()
|
62 |
+
|
63 |
+
# Sidebar content
|
64 |
+
model = st.sidebar.selectbox(
|
65 |
+
"Choose the pretrained model",
|
66 |
+
['google_t5_small_ssm_nq'],
|
67 |
+
help="For more info about the models visit: https://sparknlp.org/models"
|
68 |
+
)
|
69 |
+
|
70 |
+
# Set up the page layout
|
71 |
+
title, sub_title = (
|
72 |
+
'Automatically Answer Questions (CLOSED BOOK)',
|
73 |
+
'Automatically generate answers to questions without context.'
|
74 |
+
)
|
75 |
+
|
76 |
+
st.markdown(f'<div class="main-title">{title}</div>', unsafe_allow_html=True)
|
77 |
+
st.write(sub_title)
|
78 |
+
|
79 |
+
# Reference notebook link in sidebar
|
80 |
+
link = """
|
81 |
+
<a href="https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/public/QUESTION_ANSWERING_CLOSED_BOOK.ipynb#scrollTo=LEW2ZjZj7T1Q">
|
82 |
+
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
83 |
+
</a>
|
84 |
+
"""
|
85 |
+
st.sidebar.markdown('Reference notebook:')
|
86 |
+
st.sidebar.markdown(link, unsafe_allow_html=True)
|
87 |
+
|
88 |
+
# Load examples
|
89 |
+
examples = [
|
90 |
+
"Who is Clark Kent?",
|
91 |
+
"Which is the capital of Bulgaria ?",
|
92 |
+
"Which country tops the annual global democracy index compiled by the economist intelligence unit?",
|
93 |
+
"In which city is the Eiffel Tower located?",
|
94 |
+
"Who is the founder of Microsoft?"
|
95 |
+
]
|
96 |
+
|
97 |
+
selected_text = st.selectbox("Select an example", examples)
|
98 |
+
custom_input = st.text_input("Try it with your own Sentence!")
|
99 |
+
|
100 |
+
text_to_analyze = custom_input if custom_input else selected_text
|
101 |
+
|
102 |
+
st.write('Question:')
|
103 |
+
HTML_WRAPPER = """<div class="scroll entities" style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem; white-space:pre-wrap">{}</div>"""
|
104 |
+
st.markdown(HTML_WRAPPER.format(text_to_analyze), unsafe_allow_html=True)
|
105 |
+
|
106 |
+
# Initialize Spark and create pipeline
|
107 |
+
spark = init_spark()
|
108 |
+
pipeline = create_pipeline(model)
|
109 |
+
output = fit_data(pipeline, text_to_analyze)
|
110 |
+
|
111 |
+
# Display matched sentence
|
112 |
+
st.write("Answer:")
|
113 |
+
|
114 |
+
output_text = "".join(output[0][0])
|
115 |
+
st.markdown(HTML_WRAPPER.format(output_text.title()), unsafe_allow_html=True)
|
116 |
+
|
Dockerfile
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Download base image ubuntu 18.04
|
2 |
+
FROM ubuntu:18.04
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV NB_USER jovyan
|
6 |
+
ENV NB_UID 1000
|
7 |
+
ENV HOME /home/${NB_USER}
|
8 |
+
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
|
9 |
+
|
10 |
+
# Install required packages
|
11 |
+
RUN apt-get update && apt-get install -y \
|
12 |
+
tar \
|
13 |
+
wget \
|
14 |
+
bash \
|
15 |
+
rsync \
|
16 |
+
gcc \
|
17 |
+
libfreetype6-dev \
|
18 |
+
libhdf5-serial-dev \
|
19 |
+
libpng-dev \
|
20 |
+
libzmq3-dev \
|
21 |
+
python3 \
|
22 |
+
python3-dev \
|
23 |
+
python3-pip \
|
24 |
+
unzip \
|
25 |
+
pkg-config \
|
26 |
+
software-properties-common \
|
27 |
+
graphviz \
|
28 |
+
openjdk-8-jdk \
|
29 |
+
ant \
|
30 |
+
ca-certificates-java \
|
31 |
+
&& apt-get clean \
|
32 |
+
&& update-ca-certificates -f
|
33 |
+
|
34 |
+
# Install Python 3.8 and pip
|
35 |
+
RUN add-apt-repository ppa:deadsnakes/ppa \
|
36 |
+
&& apt-get update \
|
37 |
+
&& apt-get install -y python3.8 python3-pip \
|
38 |
+
&& apt-get clean
|
39 |
+
|
40 |
+
# Set up JAVA_HOME
|
41 |
+
RUN echo "export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/" >> /etc/profile \
|
42 |
+
&& echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile
|
43 |
+
# Create a new user named "jovyan" with user ID 1000
|
44 |
+
RUN useradd -m -u ${NB_UID} ${NB_USER}
|
45 |
+
|
46 |
+
# Switch to the "jovyan" user
|
47 |
+
USER ${NB_USER}
|
48 |
+
|
49 |
+
# Set home and path variables for the user
|
50 |
+
ENV HOME=/home/${NB_USER} \
|
51 |
+
PATH=/home/${NB_USER}/.local/bin:$PATH
|
52 |
+
|
53 |
+
# Set up PySpark to use Python 3.8 for both driver and workers
|
54 |
+
ENV PYSPARK_PYTHON=/usr/bin/python3.8
|
55 |
+
ENV PYSPARK_DRIVER_PYTHON=/usr/bin/python3.8
|
56 |
+
|
57 |
+
# Set the working directory to the user's home directory
|
58 |
+
WORKDIR ${HOME}
|
59 |
+
|
60 |
+
# Upgrade pip and install Python dependencies
|
61 |
+
RUN python3.8 -m pip install --upgrade pip
|
62 |
+
COPY requirements.txt /tmp/requirements.txt
|
63 |
+
RUN python3.8 -m pip install -r /tmp/requirements.txt
|
64 |
+
|
65 |
+
# Copy the application code into the container at /home/jovyan
|
66 |
+
COPY --chown=${NB_USER}:${NB_USER} . ${HOME}
|
67 |
+
|
68 |
+
# Expose port for Streamlit
|
69 |
+
EXPOSE 7860
|
70 |
+
|
71 |
+
# Define the entry point for the container
|
72 |
+
ENTRYPOINT ["streamlit", "run", "Demo.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
pages/Workflow & Model Overview.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
# Page configuration
|
4 |
+
st.set_page_config(
|
5 |
+
layout="wide",
|
6 |
+
initial_sidebar_state="auto"
|
7 |
+
)
|
8 |
+
|
9 |
+
# Custom CSS for better styling
|
10 |
+
st.markdown("""
|
11 |
+
<style>
|
12 |
+
.main-title {
|
13 |
+
font-size: 36px;
|
14 |
+
color: #4A90E2;
|
15 |
+
font-weight: bold;
|
16 |
+
text-align: center;
|
17 |
+
}
|
18 |
+
.sub-title {
|
19 |
+
font-size: 24px;
|
20 |
+
color: #4A90E2;
|
21 |
+
margin-top: 20px;
|
22 |
+
}
|
23 |
+
.section {
|
24 |
+
background-color: #f9f9f9;
|
25 |
+
padding: 15px;
|
26 |
+
border-radius: 10px;
|
27 |
+
margin-top: 20px;
|
28 |
+
}
|
29 |
+
.section h2 {
|
30 |
+
font-size: 22px;
|
31 |
+
color: #4A90E2;
|
32 |
+
}
|
33 |
+
.section p, .section ul {
|
34 |
+
color: #666666;
|
35 |
+
}
|
36 |
+
.link {
|
37 |
+
color: #4A90E2;
|
38 |
+
text-decoration: none;
|
39 |
+
}
|
40 |
+
</style>
|
41 |
+
""", unsafe_allow_html=True)
|
42 |
+
|
43 |
+
# Title
|
44 |
+
st.markdown('<div class="main-title">Automatically Answer Questions (CLOSED BOOK)</div>', unsafe_allow_html=True)
|
45 |
+
|
46 |
+
# Introduction Section
|
47 |
+
st.markdown("""
|
48 |
+
<div class="section">
|
49 |
+
<p>Closed-book question answering is a challenging task where a model is expected to generate accurate answers to questions without access to external information or documents during inference. This approach relies solely on the pre-trained knowledge embedded within the model, making it ideal for scenarios where retrieval-based methods are not feasible.</p>
|
50 |
+
<p>In this page, we will explore how to implement a pipeline that can automatically answer questions in a closed-book setting using state-of-the-art NLP techniques. We utilize a T5 Transformer model fine-tuned for closed-book question answering, providing accurate and contextually relevant answers to a variety of trivia questions.</p>
|
51 |
+
</div>
|
52 |
+
""", unsafe_allow_html=True)
|
53 |
+
|
54 |
+
# T5 Transformer Overview
|
55 |
+
st.markdown('<div class="sub-title">Understanding the T5 Transformer for Closed-Book QA</div>', unsafe_allow_html=True)
|
56 |
+
|
57 |
+
st.markdown("""
|
58 |
+
<div class="section">
|
59 |
+
<p>The T5 (Text-To-Text Transfer Transformer) model by Google is a versatile transformer-based model designed to handle a wide range of NLP tasks in a unified text-to-text format. For closed-book question answering, T5 is fine-tuned to generate answers directly from its internal knowledge without relying on external sources.</p>
|
60 |
+
<p>The model processes input questions and, based on its training, generates a text response that is both relevant and accurate. This makes it particularly effective in applications where access to external data sources is limited or impractical.</p>
|
61 |
+
</div>
|
62 |
+
""", unsafe_allow_html=True)
|
63 |
+
|
64 |
+
# Performance Section
|
65 |
+
st.markdown('<div class="sub-title">Performance and Benchmarks</div>', unsafe_allow_html=True)
|
66 |
+
|
67 |
+
st.markdown("""
|
68 |
+
<div class="section">
|
69 |
+
<p>The T5 model has been extensively benchmarked on various question-answering datasets, including natural questions and trivia challenges. In these evaluations, the closed-book variant of T5 has shown strong performance, often producing answers that are correct and contextually appropriate, even when the model is not allowed to reference any external data.</p>
|
70 |
+
<p>This makes the T5 model a powerful tool for generating answers in applications such as virtual assistants, educational tools, and any scenario where pre-trained knowledge is sufficient to provide responses.</p>
|
71 |
+
</div>
|
72 |
+
""", unsafe_allow_html=True)
|
73 |
+
|
74 |
+
# Implementation Section
|
75 |
+
st.markdown('<div class="sub-title">Implementing Closed-Book Question Answering</div>', unsafe_allow_html=True)
|
76 |
+
|
77 |
+
st.markdown("""
|
78 |
+
<div class="section">
|
79 |
+
<p>The following example demonstrates how to implement a closed-book question answering pipeline using Spark NLP. The pipeline includes a document assembler, a sentence detector to identify questions, and the T5 model to generate answers.</p>
|
80 |
+
</div>
|
81 |
+
""", unsafe_allow_html=True)
|
82 |
+
|
83 |
+
st.code('''
|
84 |
+
from sparknlp.base import *
|
85 |
+
from sparknlp.annotator import *
|
86 |
+
from pyspark.ml import Pipeline
|
87 |
+
from pyspark.sql.functions import col, expr
|
88 |
+
|
89 |
+
document_assembler = DocumentAssembler()\\
|
90 |
+
.setInputCol("text")\\
|
91 |
+
.setOutputCol("documents")
|
92 |
+
|
93 |
+
sentence_detector = SentenceDetectorDLModel\\
|
94 |
+
.pretrained("sentence_detector_dl", "en")\\
|
95 |
+
.setInputCols(["documents"])\\
|
96 |
+
.setOutputCol("questions")
|
97 |
+
|
98 |
+
t5 = T5Transformer()\\
|
99 |
+
.pretrained("google_t5_small_ssm_nq")\\
|
100 |
+
.setTask('trivia question:')\\
|
101 |
+
.setInputCols(["questions"])\\
|
102 |
+
.setOutputCol("answers")
|
103 |
+
|
104 |
+
pipeline = Pipeline().setStages([document_assembler, sentence_detector, t5])
|
105 |
+
|
106 |
+
data = spark.createDataFrame([["What is the capital of France?"]]).toDF("text")
|
107 |
+
result = pipeline.fit(data).transform(data)
|
108 |
+
result.select("answers.result").show(truncate=False)
|
109 |
+
''', language='python')
|
110 |
+
|
111 |
+
# Example Output
|
112 |
+
st.text("""
|
113 |
+
+---------------------------+
|
114 |
+
|answers.result |
|
115 |
+
+---------------------------+
|
116 |
+
|[Paris] |
|
117 |
+
+---------------------------+
|
118 |
+
""")
|
119 |
+
|
120 |
+
# Model Info Section
|
121 |
+
st.markdown('<div class="sub-title">Choosing the Right T5 Model</div>', unsafe_allow_html=True)
|
122 |
+
|
123 |
+
st.markdown("""
|
124 |
+
<div class="section">
|
125 |
+
<p>Several T5 models are available, each pre-trained on different datasets and tasks. For closed-book question answering, it's important to select a model that has been fine-tuned specifically for this task. The model used in the example, "google_t5_small_ssm_nq," is optimized for answering trivia questions in a closed-book setting.</p>
|
126 |
+
<p>For more complex or varied question-answering tasks, consider using larger T5 models like T5-Base or T5-Large, which may offer improved accuracy and context comprehension. Explore the available models on the <a class="link" href="https://sparknlp.org/models?annotator=T5Transformer" target="_blank">Spark NLP Models Hub</a> to find the best fit for your application.</p>
|
127 |
+
</div>
|
128 |
+
""", unsafe_allow_html=True)
|
129 |
+
|
130 |
+
# Footer
|
131 |
+
# References Section
|
132 |
+
st.markdown('<div class="sub-title">References</div>', unsafe_allow_html=True)
|
133 |
+
|
134 |
+
st.markdown("""
|
135 |
+
<div class="section">
|
136 |
+
<ul>
|
137 |
+
<li><a class="link" href="https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html" target="_blank">Google AI Blog</a>: Exploring Transfer Learning with T5</li>
|
138 |
+
<li><a class="link" href="https://sparknlp.org/models?annotator=T5Transformer" target="_blank">Spark NLP Model Hub</a>: Explore T5 models</li>
|
139 |
+
<li>Model used: <a class="link" href="https://sparknlp.org/2022/05/31/google_t5_small_ssm_nq_en_3_0.html" target="_blank">google_t5_small_ssm_nq</a></li>
|
140 |
+
<li><a class="link" href="https://github.com/google-research/text-to-text-transfer-transformer" target="_blank">GitHub</a>: T5 Transformer repository</li>
|
141 |
+
<li><a class="link" href="https://arxiv.org/abs/1910.10683" target="_blank">T5 Paper</a>: Detailed insights from the developers</li>
|
142 |
+
</ul>
|
143 |
+
</div>
|
144 |
+
""", unsafe_allow_html=True)
|
145 |
+
|
146 |
+
st.markdown('<div class="sub-title">Community & Support</div>', unsafe_allow_html=True)
|
147 |
+
|
148 |
+
st.markdown("""
|
149 |
+
<div class="section">
|
150 |
+
<ul>
|
151 |
+
<li><a class="link" href="https://sparknlp.org/" target="_blank">Official Website</a>: Documentation and examples</li>
|
152 |
+
<li><a class="link" href="https://join.slack.com/t/spark-nlp/shared_invite/zt-198dipu77-L3UWNe_AJ8xqDk0ivmih5Q" target="_blank">Slack</a>: Live discussion with the community and team</li>
|
153 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp" target="_blank">GitHub</a>: Bug reports, feature requests, and contributions</li>
|
154 |
+
<li><a class="link" href="https://medium.com/spark-nlp" target="_blank">Medium</a>: Spark NLP articles</li>
|
155 |
+
<li><a class="link" href="https://www.youtube.com/channel/UCmFOjlpYEhxf_wJUDuz6xxQ/videos" target="_blank">YouTube</a>: Video tutorials</li>
|
156 |
+
</ul>
|
157 |
+
</div>
|
158 |
+
""", unsafe_allow_html=True)
|
159 |
+
|
160 |
+
st.markdown('<div class="sub-title">Quick Links</div>', unsafe_allow_html=True)
|
161 |
+
|
162 |
+
st.markdown("""
|
163 |
+
<div class="section">
|
164 |
+
<ul>
|
165 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/quickstart" target="_blank">Getting Started</a></li>
|
166 |
+
<li><a class="link" href="https://nlp.johnsnowlabs.com/models" target="_blank">Pretrained Models</a></li>
|
167 |
+
<li><a class="link" href="https://github.com/JohnSnowLabs/spark-nlp/tree/master/examples/python/annotation/text/english" target="_blank">Example Notebooks</a></li>
|
168 |
+
<li><a class="link" href="https://sparknlp.org/docs/en/install" target="_blank">Installation Guide</a></li>
|
169 |
+
</ul>
|
170 |
+
</div>
|
171 |
+
""", unsafe_allow_html=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
st-annotated-text
|
3 |
+
streamlit-tags
|
4 |
+
pandas
|
5 |
+
numpy
|
6 |
+
spark-nlp
|
7 |
+
pyspark
|