Spaces:
Sleeping
Sleeping
Hanna Hjelmeland
commited on
Commit
•
480db19
1
Parent(s):
1bbd375
Update app with second model
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
__all__ = ['is_flower', 'learn', 'classify_image', 'categories', 'image', 'label', 'examples', 'intf']
|
2 |
|
3 |
-
from fastai.vision.all import *
|
4 |
import gradio as gr
|
5 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
|
6 |
import torch
|
@@ -9,11 +8,21 @@ import torch
|
|
9 |
model_name = "NbAiLab/nb-bert-base"
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
|
12 |
-
|
13 |
-
|
14 |
|
|
|
|
|
15 |
|
16 |
-
def classify_text(test_text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
inputs = tokenizer(test_text, return_tensors="pt")
|
18 |
|
19 |
with torch.no_grad():
|
@@ -28,9 +37,9 @@ def classify_text(test_text):
|
|
28 |
|
29 |
categories = ['Kvinner 30-40', 'Kvinner 40-55', 'Menn 30-40', 'Menn 40-55']
|
30 |
|
31 |
-
category_probabilities = list(zip(categories, probabilities))
|
32 |
|
33 |
-
max_category = max(category_probabilities, key=lambda x: x[1])
|
34 |
|
35 |
#print('The model predicts that this text lead would have a majority of readers in the target group', max_category[0])
|
36 |
|
@@ -42,8 +51,8 @@ categories = ('Kvinner 30-40', 'Kvinner 40-55', 'Menn 30-40', 'Menn 40-55')
|
|
42 |
app_title = "Target group classifier"
|
43 |
|
44 |
examples = ["Moren leter etter sønnen i et ihjelbombet leilighetskompleks.",
|
45 |
-
"De første månedene av krigen gikk så som så. Nå har Putin skiftet strategi."
|
46 |
"Fotballstadion tok fyr i helgen"
|
47 |
]
|
48 |
-
intf = gr.Interface(fn=classify_text, inputs="text", outputs=label, examples=examples, title=app_title)
|
49 |
-
intf.launch(inline=False)
|
|
|
1 |
__all__ = ['is_flower', 'learn', 'classify_image', 'categories', 'image', 'label', 'examples', 'intf']
|
2 |
|
|
|
3 |
import gradio as gr
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
|
5 |
import torch
|
|
|
8 |
model_name = "NbAiLab/nb-bert-base"
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
|
11 |
+
first_model_path = "models/first_model"
|
12 |
+
first_model = AutoModelForSequenceClassification.from_pretrained(first_model_path)
|
13 |
|
14 |
+
second_model_path = "models/second_model"
|
15 |
+
second_model = AutoModelForSequenceClassification.from_pretrained(second_model_path)
|
16 |
|
17 |
+
def classify_text(test_text, selected_model):
|
18 |
+
|
19 |
+
if selected_model == 'Model 1':
|
20 |
+
model = first_model
|
21 |
+
elif selected_model == 'Model 2':
|
22 |
+
model = second_model
|
23 |
+
else:
|
24 |
+
raise ValueError("Invalid model selection")
|
25 |
+
|
26 |
inputs = tokenizer(test_text, return_tensors="pt")
|
27 |
|
28 |
with torch.no_grad():
|
|
|
37 |
|
38 |
categories = ['Kvinner 30-40', 'Kvinner 40-55', 'Menn 30-40', 'Menn 40-55']
|
39 |
|
40 |
+
#category_probabilities = list(zip(categories, probabilities))
|
41 |
|
42 |
+
#max_category = max(category_probabilities, key=lambda x: x[1])
|
43 |
|
44 |
#print('The model predicts that this text lead would have a majority of readers in the target group', max_category[0])
|
45 |
|
|
|
51 |
app_title = "Target group classifier"
|
52 |
|
53 |
examples = ["Moren leter etter sønnen i et ihjelbombet leilighetskompleks.",
|
54 |
+
"De første månedene av krigen gikk så som så. Nå har Putin skiftet strategi.",
|
55 |
"Fotballstadion tok fyr i helgen"
|
56 |
]
|
57 |
+
intf = gr.Interface(fn=classify_text, inputs=["text", gr.DropDown(['Model 1', 'Model 2'])], outputs=label, examples=examples, title=app_title)
|
58 |
+
intf.launch(inline=False)
|