davda54 commited on
Commit
62557df
1 Parent(s): 90f7ec2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -24
README.md CHANGED
@@ -329,10 +329,9 @@ pip install accelerate
329
 
330
 
331
  ```python
332
- from transformers import AutoTokenizer, AutoModelForCausalLM
333
  import torch
 
334
 
335
- # First, we will have to import the tokenizer and the language model
336
  tokenizer = AutoTokenizer.from_pretrained("norallm/normistral-7b-warm")
337
 
338
  # This setup needs about 8gb VRAM
@@ -344,26 +343,4 @@ model = AutoModelForCausalLM.from_pretrained(
344
  load_in_8bit=True,
345
  torch_dtype=torch.bfloat16
346
  )
347
-
348
-
349
-
350
- # Now we will define the zero-shot prompt template
351
- prompt = """Engelsk: {0}
352
- Bokmål:"""
353
-
354
- # A function that will take care of generating the output
355
- @torch.no_grad()
356
- def generate(text):
357
- text = prompt.format(text)
358
- input_ids = tokenizer(text, return_tensors='pt').input_ids.cuda()
359
- prediction = model.generate(
360
- input_ids,
361
- max_new_tokens=64,
362
- do_sample=False,
363
- eos_token_id=tokenizer('\n').input_ids
364
- )
365
- return tokenizer.decode(prediction[0, input_ids.size(1):]).strip()
366
-
367
- # Now you can simply call the generate function with an English text you want to translate:
368
- generate("I'm super excited about this Norwegian NORA model! Can it translate these sentences?")
369
  ```
 
329
 
330
 
331
  ```python
 
332
  import torch
333
+ from transformers import AutoTokenizer, AutoModelForCausalLM
334
 
 
335
  tokenizer = AutoTokenizer.from_pretrained("norallm/normistral-7b-warm")
336
 
337
  # This setup needs about 8gb VRAM
 
343
  load_in_8bit=True,
344
  torch_dtype=torch.bfloat16
345
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
  ```