NickyNicky
commited on
Commit
•
daa8266
1
Parent(s):
e7656e9
Update README.md
Browse files
README.md
CHANGED
@@ -198,7 +198,25 @@ en español responde solo en json:
|
|
198 |
<start_of_turn>model
|
199 |
"""
|
200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
|
|
|
202 |
'''
|
203 |
### OUTPUT EXAMPLE
|
204 |
<start_of_turn>model
|
@@ -228,23 +246,4 @@ en español responde solo en json:
|
|
228 |
]
|
229 |
}<end_of_turn>
|
230 |
'''
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
inputs = tokenizer.encode(txt,
|
235 |
-
return_tensors="pt",
|
236 |
-
add_special_tokens=False).to("cuda:0")
|
237 |
-
max_new_tokens=1500
|
238 |
-
generation_config = GenerationConfig(
|
239 |
-
max_new_tokens=max_new_tokens,
|
240 |
-
temperature=0.15,
|
241 |
-
# top_p=0.55,
|
242 |
-
top_k=50,
|
243 |
-
repetition_penalty=1.1,
|
244 |
-
do_sample=True,
|
245 |
-
)
|
246 |
-
outputs = base_model.generate(generation_config=generation_config,
|
247 |
-
input_ids=inputs,
|
248 |
-
stopping_criteria=stopping_criteria_list,)
|
249 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=False) )
|
250 |
-
```
|
|
|
198 |
<start_of_turn>model
|
199 |
"""
|
200 |
|
201 |
+
inputs = tokenizer.encode(txt,
|
202 |
+
return_tensors="pt",
|
203 |
+
add_special_tokens=False).to("cuda:0")
|
204 |
+
max_new_tokens=1500
|
205 |
+
generation_config = GenerationConfig(
|
206 |
+
max_new_tokens=max_new_tokens,
|
207 |
+
temperature=0.15,
|
208 |
+
# top_p=0.55,
|
209 |
+
top_k=50,
|
210 |
+
repetition_penalty=1.1,
|
211 |
+
do_sample=True,
|
212 |
+
)
|
213 |
+
outputs = base_model.generate(generation_config=generation_config,
|
214 |
+
input_ids=inputs,
|
215 |
+
stopping_criteria=stopping_criteria_list,)
|
216 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=False) )
|
217 |
+
```
|
218 |
|
219 |
+
```
|
220 |
'''
|
221 |
### OUTPUT EXAMPLE
|
222 |
<start_of_turn>model
|
|
|
246 |
]
|
247 |
}<end_of_turn>
|
248 |
'''
|
249 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|