Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
---
|
2 |
license: bigcode-openrail-m
|
3 |
datasets:
|
@@ -86,7 +87,9 @@ tags:
|
|
86 |
|
87 |
## Intended use
|
88 |
|
89 |
-
The model follows instructions provided in the input. You should always preface your input with "Question: " and finish it with "Answer:", for example: "Question: Please write a function in Python that performs bubble sort
|
|
|
|
|
90 |
|
91 |
**Feel free to share your generations in the Community tab!**
|
92 |
|
@@ -97,7 +100,7 @@ The model follows instructions provided in the input. You should always preface
|
|
97 |
from peft import PeftModel
|
98 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
99 |
|
100 |
-
peft_checkpoint =
|
101 |
checkpoint = "bigcode/starcoderbase-1b"
|
102 |
model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
103 |
model = PeftModel.from_pretrained(model, peft_checkpoint)
|
@@ -106,7 +109,9 @@ device = "cuda" # for GPU usage or "cpu" for CPU usage
|
|
106 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
107 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
108 |
|
109 |
-
inputs = tokenizer.encode("Question: Please write a function in Python that performs bubble sort
|
|
|
|
|
110 |
outputs = model.generate(inputs)
|
111 |
print(tokenizer.decode(outputs[0]))
|
112 |
```
|
@@ -135,4 +140,4 @@ print(tokenizer.decode(outputs[0]))
|
|
135 |
# Citation
|
136 |
|
137 |
```bibtex
|
138 |
-
```
|
|
|
1 |
+
|
2 |
---
|
3 |
license: bigcode-openrail-m
|
4 |
datasets:
|
|
|
87 |
|
88 |
## Intended use
|
89 |
|
90 |
+
The model follows instructions provided in the input. You should always preface your input with "Question: " and finish it with "Answer:", for example: "Question: Please write a function in Python that performs bubble sort.
|
91 |
+
|
92 |
+
Answer:"
|
93 |
|
94 |
**Feel free to share your generations in the Community tab!**
|
95 |
|
|
|
100 |
from peft import PeftModel
|
101 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
102 |
|
103 |
+
peft_checkpoint = bigcode/astraios-1b-lora
|
104 |
checkpoint = "bigcode/starcoderbase-1b"
|
105 |
model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
106 |
model = PeftModel.from_pretrained(model, peft_checkpoint)
|
|
|
109 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
110 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
111 |
|
112 |
+
inputs = tokenizer.encode("Question: Please write a function in Python that performs bubble sort.
|
113 |
+
|
114 |
+
Answer:", return_tensors="pt").to(device)
|
115 |
outputs = model.generate(inputs)
|
116 |
print(tokenizer.decode(outputs[0]))
|
117 |
```
|
|
|
140 |
# Citation
|
141 |
|
142 |
```bibtex
|
143 |
+
```
|