Safetensors
mjbuehler commited on
Commit
ded826f
1 Parent(s): 86236c3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -9
README.md CHANGED
@@ -119,21 +119,22 @@ model, tokenizer = load_model(
119
  ```
120
  Inference:
121
  ```python
122
- def generate_response (model,tokenizer,text_input="What is the best biomaterial for superior strength?",
123
- num_return_sequences=1,
124
- temperature=1., #the higher the temperature, the more creative the model becomes
125
- max_new_tokens=127,
126
- num_beams=1,
 
127
  top_k = 50,
128
- top_p =0.9,repetition_penalty=1.,eos_token_id=2,verbatim=False,
129
- exponential_decay_length_penalty_fac=None,add_special_tokens=True,
130
  ):
131
  inputs = tokenizer(text_input,
132
  with torch.no_grad():
133
  outputs = model.generate(input_ids = inputs["input_ids"],
134
- attention_mask = inputs["attention_mask"] , # This is usually done automatically by the tokenizer
135
  max_new_tokens=max_new_tokens,
136
- temperature=temperature, #value used to modulate the next token probabilities.
137
  num_beams=num_beams,
138
  top_k = top_k,
139
  top_p = top_p,
 
119
  ```
120
  Inference:
121
  ```python
122
+ def generate_response (model, tokenizer,
123
+ text_input="What is the best biomaterial for superior strength?",
124
+ num_return_sequences = 1,
125
+ temperature = 0.75,
126
+ max_new_tokens = 127,
127
+ num_beams = 1,
128
  top_k = 50,
129
+ top_p = 0.9, repetition_penalty=1., eos_token_id=2, verbatim=False,
130
+ add_special_tokens=True,
131
  ):
132
  inputs = tokenizer(text_input,
133
  with torch.no_grad():
134
  outputs = model.generate(input_ids = inputs["input_ids"],
135
+ attention_mask = inputs["attention_mask"] ,
136
  max_new_tokens=max_new_tokens,
137
+ temperature=temperature,
138
  num_beams=num_beams,
139
  top_k = top_k,
140
  top_p = top_p,