Teja-Gollapudi
commited on
Commit
•
7ddf9aa
1
Parent(s):
8403990
Update README.md
Browse files
README.md
CHANGED
@@ -8,20 +8,19 @@ library_name: transformers
|
|
8 |
pipeline_tag: text-generation
|
9 |
---
|
10 |
|
11 |
-
# open-llama-0.3T-7B-
|
12 |
-
|
13 |
|
14 |
```
|
15 |
import os
|
16 |
import torch
|
17 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
18 |
|
19 |
-
model_name = 'VMware/open-llama-0.3T-7B-
|
20 |
|
21 |
|
22 |
tokenizer = AutoTokenizer.from_pretrained(model_name, add_bos_token = True)
|
23 |
|
24 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype= torch.float16, device_map = '
|
25 |
|
26 |
prompt_template = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:"
|
27 |
|
|
|
8 |
pipeline_tag: text-generation
|
9 |
---
|
10 |
|
11 |
+
# VMware/open-llama-0.3T-7B-instruct-dolly-hhrlhf
|
|
|
12 |
|
13 |
```
|
14 |
import os
|
15 |
import torch
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
17 |
|
18 |
+
model_name = 'VMware/open-llama-0.3T-7B-instruct-dolly-hhrlhf'
|
19 |
|
20 |
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name, add_bos_token = True)
|
22 |
|
23 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype= torch.float16, device_map = 'sequential')
|
24 |
|
25 |
prompt_template = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:"
|
26 |
|