mou3az commited on
Commit
457c6f9
1 Parent(s): 1477c5e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +19 -5
README.md CHANGED
@@ -22,7 +22,7 @@
22
 
23
  ### Loading the model ###
24
 
25
- ```python
26
  from peft import PeftModel, PeftConfig
27
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
28
  HUGGING_FACE_USER_NAME = "mou3az"
@@ -32,11 +32,11 @@
32
  model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=False, device_map='auto')
33
  QG_tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
34
  QG_model = PeftModel.from_pretrained(model, peft_model_id)
35
- ```
36
 
37
  ### At inference time ###
38
 
39
- ```python
40
  def get_question(context, answer):
41
  device = next(L_model.parameters()).device
42
  input_text = f"Given the context '{context}' and the answer '{answer}', what question can be asked?"
@@ -46,7 +46,7 @@
46
  out = G_tokenizer.decode(output_tokens[0], skip_special_tokens=True).replace("question:", "").strip()
47
 
48
  return out
49
- ```
50
 
51
  ### Training parameters and hyperparameters ###
52
 
@@ -85,4 +85,18 @@
85
  | 3.0 | 1.5094 | 1.202135 |
86
  | 6.0 | 1.2677 | 1.146177 |
87
  | 9.0 | 1.2613 | 1.112074 |
88
- | 12.0 | 1.1958 | 1.109059 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  ### Loading the model ###
24
 
25
+ ```python
26
  from peft import PeftModel, PeftConfig
27
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
28
  HUGGING_FACE_USER_NAME = "mou3az"
 
32
  model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=False, device_map='auto')
33
  QG_tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
34
  QG_model = PeftModel.from_pretrained(model, peft_model_id)
35
+ ```
36
 
37
  ### At inference time ###
38
 
39
+ ```python
40
  def get_question(context, answer):
41
  device = next(L_model.parameters()).device
42
  input_text = f"Given the context '{context}' and the answer '{answer}', what question can be asked?"
 
46
  out = G_tokenizer.decode(output_tokens[0], skip_special_tokens=True).replace("question:", "").strip()
47
 
48
  return out
49
+ ```
50
 
51
  ### Training parameters and hyperparameters ###
52
 
 
85
  | 3.0 | 1.5094 | 1.202135 |
86
  | 6.0 | 1.2677 | 1.146177 |
87
  | 9.0 | 1.2613 | 1.112074 |
88
+ | 12.0 | 1.1958 | 1.109059 |
89
+
90
+
91
+ ---
92
+ tags:
93
+ - question-answering
94
+ - bert
95
+ - huggingface
96
+ - transformers
97
+ license: apache-2.0
98
+ model-index:
99
+ leaderboard: true
100
+ leaderboard_id: question-answering
101
+ category: Question Answering
102
+ ---