feat update readme
Browse files
README.md
CHANGED
@@ -55,7 +55,7 @@ src_text = prefix + "Съешь ещё этих мягких французск
|
|
55 |
# translate Russian to Chinese
|
56 |
input_ids = tokenizer(src_text, return_tensors="pt")
|
57 |
|
58 |
-
generated_tokens = model.generate(**input_ids
|
59 |
|
60 |
result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
61 |
print(result)
|
@@ -82,7 +82,7 @@ src_text = prefix + "再吃这些法国的甜蜜的面包。"
|
|
82 |
# translate Russian to Chinese
|
83 |
input_ids = tokenizer(src_text, return_tensors="pt")
|
84 |
|
85 |
-
generated_tokens = model.generate(**input_ids
|
86 |
|
87 |
result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
88 |
print(result)
|
|
|
55 |
# translate Russian to Chinese
|
56 |
input_ids = tokenizer(src_text, return_tensors="pt")
|
57 |
|
58 |
+
generated_tokens = model.generate(**input_ids.to(device))
|
59 |
|
60 |
result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
61 |
print(result)
|
|
|
82 |
# translate Russian to Chinese
|
83 |
input_ids = tokenizer(src_text, return_tensors="pt")
|
84 |
|
85 |
+
generated_tokens = model.generate(**input_ids.to(device))
|
86 |
|
87 |
result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
88 |
print(result)
|