Update README.md
Browse files
README.md
CHANGED
@@ -29,7 +29,8 @@ model-index:
|
|
29 |
value: 63.57
|
30 |
name: normalized accuracy
|
31 |
source:
|
32 |
-
url:
|
|
|
33 |
name: Open LLM Leaderboard
|
34 |
- task:
|
35 |
type: text-generation
|
@@ -45,7 +46,8 @@ model-index:
|
|
45 |
value: 83.64
|
46 |
name: normalized accuracy
|
47 |
source:
|
48 |
-
url:
|
|
|
49 |
name: Open LLM Leaderboard
|
50 |
- task:
|
51 |
type: text-generation
|
@@ -62,7 +64,8 @@ model-index:
|
|
62 |
value: 63.5
|
63 |
name: accuracy
|
64 |
source:
|
65 |
-
url:
|
|
|
66 |
name: Open LLM Leaderboard
|
67 |
- task:
|
68 |
type: text-generation
|
@@ -78,7 +81,8 @@ model-index:
|
|
78 |
- type: mc2
|
79 |
value: 50.21
|
80 |
source:
|
81 |
-
url:
|
|
|
82 |
name: Open LLM Leaderboard
|
83 |
- task:
|
84 |
type: text-generation
|
@@ -95,7 +99,8 @@ model-index:
|
|
95 |
value: 78.14
|
96 |
name: accuracy
|
97 |
source:
|
98 |
-
url:
|
|
|
99 |
name: Open LLM Leaderboard
|
100 |
- task:
|
101 |
type: text-generation
|
@@ -112,8 +117,10 @@ model-index:
|
|
112 |
value: 59.36
|
113 |
name: accuracy
|
114 |
source:
|
115 |
-
url:
|
|
|
116 |
name: Open LLM Leaderboard
|
|
|
117 |
---
|
118 |
|
119 |
# Taurus 7B 1.0
|
@@ -155,23 +162,24 @@ User message<|im_end|>
|
|
155 |
|
156 |
```python
|
157 |
import torch
|
158 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer,
|
159 |
|
160 |
|
161 |
model_id = "rxavier/Taurus-7B-1.0"
|
162 |
model = AutoModelForCausalLM.from_pretrained(
|
163 |
-
|
164 |
-
torch_dtype=torch.bfloat16,
|
|
|
165 |
)
|
166 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
167 |
generation_config = GenerationConfig(
|
168 |
-
bos_token_id=
|
169 |
-
eos_token_id=
|
170 |
-
pad_token_id=
|
171 |
)
|
172 |
|
173 |
-
prompt = "Give me latex formulas for extended euler equations"
|
174 |
system_message = "You are an expert in economics with PhD level knowledge. You are helpful, give thorough and clear explanations, and use equations and formulas where needed."
|
|
|
175 |
|
176 |
messages = [{"role": "system",
|
177 |
"content": system_message},
|
@@ -180,7 +188,7 @@ messages = [{"role": "system",
|
|
180 |
tokens = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
|
181 |
|
182 |
with torch.no_grad():
|
183 |
-
outputs = model.generate(inputs=tokens, generation_config=generation_config)
|
184 |
print(tokenizer.decode(outputs["sequences"].cpu().tolist()[0]))
|
185 |
```
|
186 |
|
|
|
29 |
value: 63.57
|
30 |
name: normalized accuracy
|
31 |
source:
|
32 |
+
url: >-
|
33 |
+
https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=rxavier/Taurus-7B-1.0
|
34 |
name: Open LLM Leaderboard
|
35 |
- task:
|
36 |
type: text-generation
|
|
|
46 |
value: 83.64
|
47 |
name: normalized accuracy
|
48 |
source:
|
49 |
+
url: >-
|
50 |
+
https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=rxavier/Taurus-7B-1.0
|
51 |
name: Open LLM Leaderboard
|
52 |
- task:
|
53 |
type: text-generation
|
|
|
64 |
value: 63.5
|
65 |
name: accuracy
|
66 |
source:
|
67 |
+
url: >-
|
68 |
+
https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=rxavier/Taurus-7B-1.0
|
69 |
name: Open LLM Leaderboard
|
70 |
- task:
|
71 |
type: text-generation
|
|
|
81 |
- type: mc2
|
82 |
value: 50.21
|
83 |
source:
|
84 |
+
url: >-
|
85 |
+
https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=rxavier/Taurus-7B-1.0
|
86 |
name: Open LLM Leaderboard
|
87 |
- task:
|
88 |
type: text-generation
|
|
|
99 |
value: 78.14
|
100 |
name: accuracy
|
101 |
source:
|
102 |
+
url: >-
|
103 |
+
https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=rxavier/Taurus-7B-1.0
|
104 |
name: Open LLM Leaderboard
|
105 |
- task:
|
106 |
type: text-generation
|
|
|
117 |
value: 59.36
|
118 |
name: accuracy
|
119 |
source:
|
120 |
+
url: >-
|
121 |
+
https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=rxavier/Taurus-7B-1.0
|
122 |
name: Open LLM Leaderboard
|
123 |
+
library_name: transformers
|
124 |
---
|
125 |
|
126 |
# Taurus 7B 1.0
|
|
|
162 |
|
163 |
```python
|
164 |
import torch
|
165 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
166 |
|
167 |
|
168 |
model_id = "rxavier/Taurus-7B-1.0"
|
169 |
model = AutoModelForCausalLM.from_pretrained(
|
170 |
+
model_id,
|
171 |
+
torch_dtype=torch.bfloat16, #torch.float16 for older GPUs
|
172 |
+
device_map="auto", # Requires having accelerate installed, useful in places like Colab with limited VRAM
|
173 |
)
|
174 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
175 |
generation_config = GenerationConfig(
|
176 |
+
bos_token_id=tokenizer.bos_token_id,
|
177 |
+
eos_token_id=tokenizer.eos_token_id,
|
178 |
+
pad_token_id=tokenizer.pad_token_id,
|
179 |
)
|
180 |
|
|
|
181 |
system_message = "You are an expert in economics with PhD level knowledge. You are helpful, give thorough and clear explanations, and use equations and formulas where needed."
|
182 |
+
prompt = "Give me latex formulas for extended euler equations"
|
183 |
|
184 |
messages = [{"role": "system",
|
185 |
"content": system_message},
|
|
|
188 |
tokens = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
|
189 |
|
190 |
with torch.no_grad():
|
191 |
+
outputs = model.generate(inputs=tokens, generation_config=generation_config, max_length=512)
|
192 |
print(tokenizer.decode(outputs["sequences"].cpu().tolist()[0]))
|
193 |
```
|
194 |
|