Update README.md
Browse files
README.md
CHANGED
@@ -4,7 +4,7 @@ tags: []
|
|
4 |
widget:
|
5 |
- messages:
|
6 |
- role: user
|
7 |
-
content:
|
8 |
inference:
|
9 |
parameters:
|
10 |
max_new_tokens: 200
|
@@ -58,10 +58,10 @@ Below we share some code snippets on how to get quickly started with running the
|
|
58 |
```python
|
59 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
60 |
|
61 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
62 |
-
model = AutoModelForCausalLM.from_pretrained("
|
63 |
|
64 |
-
input_text = "
|
65 |
input_ids = tokenizer(input_text, return_tensors="pt")
|
66 |
|
67 |
outputs = model.generate(**input_ids)
|
@@ -76,10 +76,10 @@ print(tokenizer.decode(outputs[0]))
|
|
76 |
# pip install accelerate
|
77 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
78 |
|
79 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
80 |
-
model = AutoModelForCausalLM.from_pretrained("
|
81 |
|
82 |
-
input_text = "
|
83 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
84 |
|
85 |
outputs = model.generate(**input_ids)
|
@@ -95,10 +95,10 @@ print(tokenizer.decode(outputs[0]))
|
|
95 |
# pip install accelerate
|
96 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
97 |
|
98 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
99 |
-
model = AutoModelForCausalLM.from_pretrained("
|
100 |
|
101 |
-
input_text = "
|
102 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
103 |
|
104 |
outputs = model.generate(**input_ids)
|
@@ -111,8 +111,8 @@ print(tokenizer.decode(outputs[0]))
|
|
111 |
# pip install accelerate
|
112 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
113 |
|
114 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
115 |
-
model = AutoModelForCausalLM.from_pretrained("
|
116 |
|
117 |
input_text = "Write me a poem about Machine Learning."
|
118 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -131,8 +131,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
131 |
|
132 |
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
133 |
|
134 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
135 |
-
model = AutoModelForCausalLM.from_pretrained("
|
136 |
|
137 |
input_text = "Write me a poem about Machine Learning."
|
138 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -149,8 +149,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
149 |
|
150 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
151 |
|
152 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
153 |
-
model = AutoModelForCausalLM.from_pretrained("
|
154 |
|
155 |
input_text = "Write me a poem about Machine Learning."
|
156 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
4 |
widget:
|
5 |
- messages:
|
6 |
- role: user
|
7 |
+
content: Explain what monthly recurring revenue is.
|
8 |
inference:
|
9 |
parameters:
|
10 |
max_new_tokens: 200
|
|
|
58 |
```python
|
59 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
60 |
|
61 |
+
tokenizer = AutoTokenizer.from_pretrained("core-outline/gemma-2b-instruct")
|
62 |
+
model = AutoModelForCausalLM.from_pretrained("core-outline/gemma-2b-instruct")
|
63 |
|
64 |
+
input_text = "Explain what churn rate is."
|
65 |
input_ids = tokenizer(input_text, return_tensors="pt")
|
66 |
|
67 |
outputs = model.generate(**input_ids)
|
|
|
76 |
# pip install accelerate
|
77 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
78 |
|
79 |
+
tokenizer = AutoTokenizer.from_pretrained("core-outline/gemma-2b-instruct")
|
80 |
+
model = AutoModelForCausalLM.from_pretrained("core-outline/gemma-2b-instruct", device_map="auto")
|
81 |
|
82 |
+
input_text = "How is click through rate calculated."
|
83 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
84 |
|
85 |
outputs = model.generate(**input_ids)
|
|
|
95 |
# pip install accelerate
|
96 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
97 |
|
98 |
+
tokenizer = AutoTokenizer.from_pretrained("core-outline/gemma-2b-instruct")
|
99 |
+
model = AutoModelForCausalLM.from_pretrained("core-outline/gemma-2b-instruct", device_map="auto", torch_dtype=torch.float16)
|
100 |
|
101 |
+
input_text = "What is an RFM analysis?"
|
102 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
103 |
|
104 |
outputs = model.generate(**input_ids)
|
|
|
111 |
# pip install accelerate
|
112 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
113 |
|
114 |
+
tokenizer = AutoTokenizer.from_pretrained("core-outline/gemma-2b-instruct")
|
115 |
+
model = AutoModelForCausalLM.from_pretrained("core-outline/gemma-2b-instruct", device_map="auto", torch_dtype=torch.bfloat16)
|
116 |
|
117 |
input_text = "Write me a poem about Machine Learning."
|
118 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
131 |
|
132 |
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
133 |
|
134 |
+
tokenizer = AutoTokenizer.from_pretrained("core-outline/gemma-2b-instruct")
|
135 |
+
model = AutoModelForCausalLM.from_pretrained("core-outline/gemma-2b-instruct", quantization_config=quantization_config)
|
136 |
|
137 |
input_text = "Write me a poem about Machine Learning."
|
138 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
149 |
|
150 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
151 |
|
152 |
+
tokenizer = AutoTokenizer.from_pretrained("core-outline/gemma-2b-instruct")
|
153 |
+
model = AutoModelForCausalLM.from_pretrained("core-outline/gemma-2b-instruct", quantization_config=quantization_config)
|
154 |
|
155 |
input_text = "Write me a poem about Machine Learning."
|
156 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|