Spaces:
Sleeping
Sleeping
arjunascagnetto
commited on
Commit
•
79a7d58
1
Parent(s):
920c384
Update app.py
Browse files
app.py
CHANGED
@@ -2,14 +2,15 @@
|
|
2 |
|
3 |
import gradio as gr
|
4 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
9 |
-
|
10 |
-
|
11 |
def generate_text(input_text):
|
12 |
-
input_ids = tokenizer.encode(input_text, return_tensors=
|
13 |
output = model.generate(input_ids)
|
14 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
15 |
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
5 |
+
from transformers import BertTokenizer, BertLMHeadModel
|
6 |
|
7 |
+
# Load pre-trained model and tokenizer
|
8 |
+
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
9 |
+
model = BertLMHeadModel.from_pretrained('bert-base-uncased')
|
10 |
|
11 |
+
# Define a function to generate text using the model
|
|
|
|
|
|
|
12 |
def generate_text(input_text):
|
13 |
+
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
14 |
output = model.generate(input_ids)
|
15 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
16 |
|