titipata commited on
Commit
ac9ed3d
1 Parent(s): 8960087

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -12
README.md CHANGED
@@ -35,7 +35,7 @@ To load the model:
35
 
36
  ```py
37
  from transformers import BertForSequenceClassification, AutoTokenizer
38
- model_path = 'biodatlab/MIReAD-Neuro'
39
  model = BertForSequenceClassification.from_pretrained(model_path)
40
  tokenizer = AutoTokenizer.from_pretrained(model_path)
41
  ```
@@ -46,22 +46,21 @@ To create embeddings and for classification:
46
  # sample abstract & title text
47
  title = "Why Brain Criticality Is Clinically Relevant: A Scoping Review."
48
  abstract = "The past 25 years have seen a strong increase in the number of publications related to criticality in different areas of neuroscience..."
49
-
50
  text = title + tokenizer.sep_token + abstract
51
-
52
- tokens = tokenizer(text,
53
- max_length=512,
54
- padding=True,
55
- truncation=True,
56
- return_tensors="pt"
57
- )
58
-
59
- # for embeddings
60
  with torch.no_grad():
61
  output = model.bert(**tokens)
62
  embedding = output.last_hidden_state[:, 0, :]
63
 
64
- # for classification
65
  output = model(**tokens)
66
  class = output.logits
67
  ```
 
35
 
36
  ```py
37
  from transformers import BertForSequenceClassification, AutoTokenizer
38
+ model_path = "biodatlab/MIReAD-Neuro"
39
  model = BertForSequenceClassification.from_pretrained(model_path)
40
  tokenizer = AutoTokenizer.from_pretrained(model_path)
41
  ```
 
46
  # sample abstract & title text
47
  title = "Why Brain Criticality Is Clinically Relevant: A Scoping Review."
48
  abstract = "The past 25 years have seen a strong increase in the number of publications related to criticality in different areas of neuroscience..."
 
49
  text = title + tokenizer.sep_token + abstract
50
+ tokens = tokenizer(
51
+ text,
52
+ max_length=512,
53
+ padding=True,
54
+ truncation=True,
55
+ return_tensors="pt"
56
+ )
57
+
58
+ # to generate an embedding from a given title and abstract
59
  with torch.no_grad():
60
  output = model.bert(**tokens)
61
  embedding = output.last_hidden_state[:, 0, :]
62
 
63
+ # to classify (200 journals) a given title and abstract
64
  output = model(**tokens)
65
  class = output.logits
66
  ```