Spaces:
Runtime error
Runtime error
Duplicate from ThankGod/text-classification
Browse filesCo-authored-by: Egbe <ThankGod@users.noreply.huggingface.co>
- .github/workflows/main.yml +19 -0
- Makefile +27 -0
- README.md +22 -0
- app.py +67 -0
- file +1 -0
- requirements.txt +5 -0
- text-classification +1 -0
.github/workflows/main.yml
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face hub
|
2 |
+
on:
|
3 |
+
push:
|
4 |
+
branches: [main]
|
5 |
+
|
6 |
+
# to run this workflow manually from the Actions tab
|
7 |
+
workflow_dispatch:
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
sync-to-hub:
|
11 |
+
runs-on: ubuntu-latest
|
12 |
+
steps:
|
13 |
+
- uses: actions/checkout@v2
|
14 |
+
with:
|
15 |
+
fetch-depth: 0
|
16 |
+
- name: Push to hub
|
17 |
+
env:
|
18 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
19 |
+
run: git push --force https://ThankGod:$HF_TOKEN@huggingface.co/spaces/ThankGod/text-classification main
|
Makefile
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
install:
|
2 |
+
pip install --upgrade pip &&\
|
3 |
+
pip install -r requirements.txt
|
4 |
+
|
5 |
+
test:
|
6 |
+
python -m pytest -vvv --cov=hello --cov=greeting \
|
7 |
+
--cov=smath --cov=web tests
|
8 |
+
python -m pytest --nbval notebook.ipynb #tests our jupyter notebook
|
9 |
+
#python -m pytest -v tests/test_web.py #if you just want to test web
|
10 |
+
|
11 |
+
debug:
|
12 |
+
python -m pytest -vv --pdb #Debugger is invoked
|
13 |
+
|
14 |
+
one-test:
|
15 |
+
python -m pytest -vv tests/test_greeting.py::test_my_name4
|
16 |
+
|
17 |
+
debugthree:
|
18 |
+
#not working the way I expect
|
19 |
+
python -m pytest -vv --pdb --maxfail=4 # drop to PDB for first three failures
|
20 |
+
|
21 |
+
format:
|
22 |
+
black *.py
|
23 |
+
|
24 |
+
lint:
|
25 |
+
pylint --disable=R,C *.py
|
26 |
+
|
27 |
+
all: install lint test format
|
README.md
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Text Classification/ Sentiment analysis
|
3 |
+
emoji: 📸
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.1.7
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: ThankGod/text-classification
|
11 |
+
---
|
12 |
+
|
13 |
+
[Try Demo Text classification Here](https://huggingface.co/spaces/ThankGod/text-classification)
|
14 |
+
|
15 |
+
## Credits
|
16 |
+
- Hugging face 🤗 for hosting this demo.
|
17 |
+
- Hugging face transformer model for text classification transformer model
|
18 |
+
- Gradio for the beautiful visualization dashboards.
|
19 |
+
|
20 |
+
## References
|
21 |
+
- https://gradio.app/
|
22 |
+
- https://huggingface.co/
|
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForSequenceClassification
|
2 |
+
from transformers import AutoTokenizer, AutoConfig
|
3 |
+
import numpy as np
|
4 |
+
from scipy.special import softmax
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
# Preprocess text (username and link placeholders)
|
8 |
+
def preprocess(text):
|
9 |
+
new_text = []
|
10 |
+
for t in text.split(" "):
|
11 |
+
t = '@user' if t.startswith('@') and len(t) > 1 else t
|
12 |
+
t = 'http' if t.startswith('http') else t
|
13 |
+
new_text.append(t)
|
14 |
+
return " ".join(new_text)
|
15 |
+
|
16 |
+
# load model
|
17 |
+
MODEL = f"cardiffnlp/twitter-roberta-base-sentiment-latest"
|
18 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
|
19 |
+
#model.save_pretrained(MODEL)
|
20 |
+
|
21 |
+
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
23 |
+
config = AutoConfig.from_pretrained(MODEL)
|
24 |
+
|
25 |
+
# create classifier function
|
26 |
+
def classify_sentiments(text):
|
27 |
+
text = preprocess(text)
|
28 |
+
encoded_input = tokenizer(text, return_tensors='pt')
|
29 |
+
output = model(**encoded_input)
|
30 |
+
scores = output[0][0].detach().numpy()
|
31 |
+
scores = softmax(scores)
|
32 |
+
|
33 |
+
# Print labels and scores
|
34 |
+
probs = {}
|
35 |
+
ranking = np.argsort(scores)
|
36 |
+
ranking = ranking[::-1]
|
37 |
+
|
38 |
+
for i in range(len(scores)):
|
39 |
+
l = config.id2label[ranking[i]]
|
40 |
+
s = scores[ranking[i]]
|
41 |
+
probs[l] = np.round(float(s), 4)
|
42 |
+
return probs
|
43 |
+
|
44 |
+
|
45 |
+
#build the Gradio app
|
46 |
+
#Instructuction = "Write an imaginary review about a product or service you might be interested in."
|
47 |
+
title="Text Sentiment Analysis"
|
48 |
+
description = """Write a Good or Bad review about an imaginary product or service,\
|
49 |
+
see how the machine learning model is able to predict your sentiments"""
|
50 |
+
article = """
|
51 |
+
- Click submit button to test sentiment analysis prediction
|
52 |
+
- Click clear button to refresh text
|
53 |
+
"""
|
54 |
+
|
55 |
+
gr.Interface(classify_sentiments,
|
56 |
+
'text',
|
57 |
+
'label',
|
58 |
+
title = title,
|
59 |
+
description = description,
|
60 |
+
#Instruction = Instructuction,
|
61 |
+
article = article,
|
62 |
+
allow_flagging = "never",
|
63 |
+
live = False,
|
64 |
+
examples=["This has to be the best Introductory course in machine learning",
|
65 |
+
"I consider this training an absolute waste of time."]
|
66 |
+
).launch()
|
67 |
+
|
file
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
more lines
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
scipy
|
2 |
+
gradio
|
3 |
+
numpy
|
4 |
+
transformers
|
5 |
+
torch
|
text-classification
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit e187cc1fff0bfec15df56f125f944607154499a9
|