Fix code snippets from model card
#1
by
osanseviero
- opened
README.md
CHANGED
@@ -159,7 +159,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
|
159 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
|
160 |
|
161 |
input_text = "translate English to German: How old are you?"
|
162 |
-
input_ids = tokenizer
|
163 |
|
164 |
outputs = model.generate(input_ids)
|
165 |
print(tokenizer.decode(outputs[0]))
|
@@ -180,7 +180,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
|
180 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto")
|
181 |
|
182 |
input_text = "translate English to German: How old are you?"
|
183 |
-
input_ids = tokenizer
|
184 |
|
185 |
outputs = model.generate(input_ids)
|
186 |
print(tokenizer.decode(outputs[0]))
|
@@ -204,7 +204,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
|
204 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16)
|
205 |
|
206 |
input_text = "translate English to German: How old are you?"
|
207 |
-
input_ids = tokenizer
|
208 |
|
209 |
outputs = model.generate(input_ids)
|
210 |
print(tokenizer.decode(outputs[0]))
|
@@ -225,7 +225,7 @@ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
|
225 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", load_in_8bit=True)
|
226 |
|
227 |
input_text = "translate English to German: How old are you?"
|
228 |
-
input_ids = tokenizer
|
229 |
|
230 |
outputs = model.generate(input_ids)
|
231 |
print(tokenizer.decode(outputs[0]))
|
|
|
159 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
|
160 |
|
161 |
input_text = "translate English to German: How old are you?"
|
162 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
163 |
|
164 |
outputs = model.generate(input_ids)
|
165 |
print(tokenizer.decode(outputs[0]))
|
|
|
180 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto")
|
181 |
|
182 |
input_text = "translate English to German: How old are you?"
|
183 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
184 |
|
185 |
outputs = model.generate(input_ids)
|
186 |
print(tokenizer.decode(outputs[0]))
|
|
|
204 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", torch_dtype=torch.float16)
|
205 |
|
206 |
input_text = "translate English to German: How old are you?"
|
207 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
208 |
|
209 |
outputs = model.generate(input_ids)
|
210 |
print(tokenizer.decode(outputs[0]))
|
|
|
225 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large", device_map="auto", load_in_8bit=True)
|
226 |
|
227 |
input_text = "translate English to German: How old are you?"
|
228 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
|
229 |
|
230 |
outputs = model.generate(input_ids)
|
231 |
print(tokenizer.decode(outputs[0]))
|