gauneg commited on
Commit
0c167b2
·
verified ·
1 Parent(s): a81048c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -4
README.md CHANGED
@@ -25,7 +25,6 @@ This model has been trained on the following datasets:
25
  from transformers import AutoTokenizer, AutoModelForTokenClassification
26
  model_id = "gauneg/roberta-base-absa-ate-sentiment"
27
  tokenizer = AutoTokenizer.from_pretrained(model_id)
28
- model = AutoModelForTokenClassification.from_pretrained(model_id)
29
 
30
 
31
  # the sequence of labels used during training
@@ -33,6 +32,8 @@ labels = {"B-neu": 1, "I-neu": 2, "O": 0, "B-neg": 3, "B-con": 4, "I-pos": 5, "B
33
  id2lab = {idx: lab for lab, idx in labels.items()}
34
  lab2id = {lab: idx for lab, idx in labels.items()}
35
 
 
 
36
 
37
  # making one prediction at a time (should be padded/batched and truncated for efficiency)
38
  text_input = "Been here a few times and food has always been good but service really suffers when it gets crowded."
@@ -41,14 +42,17 @@ tok_inputs = tokenizer(text_input, return_tensors="pt")
41
 
42
  y_pred = model(**tok_inputs) # predicting the logits
43
 
44
- y_pred_fin = y_pred.logits.argmax(dim=-1)[0] # selecting the most favoured labels for each token from the logits
 
 
 
45
 
46
  decoded_pred = [id2lab[logx.item()] for logx in y_pred_fin]
47
 
48
 
49
  ## displaying the input tokens with predictions and skipping <s> and </s> tokens at the beginning and the end respectively
50
-
51
- tok_levl_pred = list(zip(tokenizer.convert_ids_to_tokens(tok_inputs['input_ids'][0]), decoded_pred))[1:-1]
52
 
53
  ```
54
 
 
25
  from transformers import AutoTokenizer, AutoModelForTokenClassification
26
  model_id = "gauneg/roberta-base-absa-ate-sentiment"
27
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
28
 
29
 
30
  # the sequence of labels used during training
 
32
  id2lab = {idx: lab for lab, idx in labels.items()}
33
  lab2id = {lab: idx for lab, idx in labels.items()}
34
 
35
+ model = AutoModelForTokenClassification.from_pretrained(model_id,
36
+ num_labels=len(labels), id2label=id2lab, label2id=lab2id)
37
 
38
  # making one prediction at a time (should be padded/batched and truncated for efficiency)
39
  text_input = "Been here a few times and food has always been good but service really suffers when it gets crowded."
 
42
 
43
  y_pred = model(**tok_inputs) # predicting the logits
44
 
45
+ # since first and the last tokens are excluded (<s> and </s>)
46
+ # they have to be removed before decoding the labels predicted against them
47
+ y_pred_fin = y_pred.logits.argmax(dim=-1)[0][1:-1] # selecting the most favoured labels for each token from the logits
48
+
49
 
50
  decoded_pred = [id2lab[logx.item()] for logx in y_pred_fin]
51
 
52
 
53
  ## displaying the input tokens with predictions and skipping <s> and </s> tokens at the beginning and the end respectively
54
+ decoded_toks = tok_inputs['input_ids'][0][1:-1]
55
+ tok_levl_pred = list(zip(tokenizer.convert_ids_to_tokens(decoded_toks), decoded_pred))
56
 
57
  ```
58