|
|
|
|
|
# Model Card for Model ID |
|
|
|
finetuned the LLAMA2-7b-chat model using LoRA. |
|
|
|
This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). |
|
|
|
## Training Details |
|
|
|
### Training Data |
|
|
|
Trained on this dataset |
|
|
|
## mlabonne/guanaco-llama2-1k |
|
|
|
<!-- ## code --> |
|
|
|
<!-- !pip install transformers accelerate |
|
pip install transformers accelerate |
|
|
|
from transformers import AutoTokenizer |
|
import transformers |
|
import torch |
|
|
|
model = "smd29/llama-2-7b-chat-finetuned" |
|
" |
|
prompt = "What is a large language model?" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model) |
|
|
|
pipeline = transformers.pipeline( |
|
"text-generation", |
|
model=model, |
|
torch_dtype=torch.float16, |
|
device_map="auto", |
|
) |
|
|
|
sequences = pipeline( |
|
f'<s>[INST] {prompt} [/INST]', |
|
do_sample=True, |
|
top_k=10, |
|
num_return_sequences=1, |
|
eos_token_id=tokenizer.eos_token_id, |
|
max_length=200, |
|
) |
|
for seq in sequences: |
|
print(f"Result: {seq['generated_text']}") |
|
prompt="explain me in a simple way,what the equation for finding the nth triangle number is and how it can be proved by using only high school level math. please give each step of a proof using LaTeX." |
|
sequences = pipeline( |
|
f'<s>[INST] {prompt} [/INST]', |
|
do_sample=True, |
|
top_k=10, |
|
num_return_sequences=1, |
|
eos_token_id=tokenizer.eos_token_id, |
|
max_length=400, |
|
) |
|
for seq in sequences: |
|
print(f"Result: {seq['generated_text']}") |
|
|
|
|
|
|
|
|