chiliu commited on
Commit
c84ad1e
1 Parent(s): cab348c

add readme

Browse files
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,3 +1,128 @@
1
  ---
 
 
 
 
 
 
 
 
 
 
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ library_name: transformers
5
+ tags:
6
+ - gpt
7
+ - llm
8
+ - large language model
9
+ inference: false
10
+ thumbnail: >-
11
+ https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico
12
  license: apache-2.0
13
  ---
14
+ # Model Card
15
+
16
+ **The Best 3B Model! Surpassing dolly-v2-12b**
17
+
18
+ The best 3B model on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), with performance surpassing dolly-v2-12b
19
+
20
+ | Metric | Value |
21
+ |-----------------------|-------|
22
+ | MMLU (5-shot) | 27.3 |
23
+ | ARC (25-shot) | 41.7 |
24
+ | HellaSwag (10-shot) | 71.1 |
25
+ | TruthfulQA (0-shot) | 37.9 |
26
+ | Avg. | 44.5 |
27
+
28
+ We use state-of-the-art [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) to run the benchmark tests above.
29
+
30
+
31
+ The training code and data will be open sourced later on Github(https://github.com/chi2liu/mamba-gpt-3b)
32
+
33
+
34
+ ## Training Dataset
35
+
36
+ ` mamba-gpt-3b-v3 ` is trained on multiply dataset:
37
+ - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
38
+ - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
39
+ - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
40
+ - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
41
+
42
+
43
+ ## Summary
44
+
45
+ We have fine-tuned the open-lama model and surpassed the original model in multiple evaluation subtasks, making it currently the best performing 3B model with comparable performance to llama-7b
46
+ - Base model: [openlm-research/open_llama_3b_v2](https://huggingface.co/openlm-research/open_llama_3b_v2)
47
+
48
+ ## Usage
49
+
50
+ To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers`, `accelerate` and `torch` libraries installed.
51
+
52
+ ```bash
53
+ pip install transformers==4.29.2
54
+ pip install accelerate==0.19.0
55
+ pip install torch==2.0.0
56
+ ```
57
+
58
+ ```python
59
+ from transformers import AutoTokenizer, AutoModelForCausalLM
60
+
61
+ tokenizer = AutoTokenizer.from_pretrained("CobraMamba/mamba-gpt-3b-v3")
62
+ model = AutoModelForCausalLM.from_pretrained("CobraMamba/mamba-gpt-3b-v3", trust_remote_code=True, torch_dtype=torch.float16)
63
+
64
+ input_context = "Your text here"
65
+ input_ids = tokenizer.encode(input_context, return_tensors="pt")
66
+ output = model.generate(input_ids, max_length=128, temperature=0.7)
67
+ output_text = tokenizer.decode(output[0], skip_special_tokens=True)
68
+ print(output_text)
69
+
70
+ ```
71
+
72
+ ## Model Architecture
73
+
74
+ ```
75
+ LlamaForCausalLM(
76
+ (model): LlamaModel(
77
+ (embed_tokens): Embedding(32000, 4096, padding_idx=0)
78
+ (layers): ModuleList(
79
+ (0-31): 32 x LlamaDecoderLayer(
80
+ (self_attn): LlamaAttention(
81
+ (q_proj): Linear(in_features=4096, out_features=4096, bias=False)
82
+ (k_proj): Linear(in_features=4096, out_features=4096, bias=False)
83
+ (v_proj): Linear(in_features=4096, out_features=4096, bias=False)
84
+ (o_proj): Linear(in_features=4096, out_features=4096, bias=False)
85
+ (rotary_emb): LlamaRotaryEmbedding()
86
+ )
87
+ (mlp): LlamaMLP(
88
+ (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)
89
+ (down_proj): Linear(in_features=11008, out_features=4096, bias=False)
90
+ (up_proj): Linear(in_features=4096, out_features=11008, bias=False)
91
+ (act_fn): SiLUActivation()
92
+ )
93
+ (input_layernorm): LlamaRMSNorm()
94
+ (post_attention_layernorm): LlamaRMSNorm()
95
+ )
96
+ )
97
+ (norm): LlamaRMSNorm()
98
+ )
99
+ (lm_head): Linear(in_features=4096, out_features=32000, bias=False)
100
+ )
101
+ ```
102
+
103
+ ## Citation
104
+
105
+ If this work is helpful, please kindly cite as:
106
+
107
+ ```bibtex
108
+ @Misc{mamba-gpt-3b-v2,
109
+ title = {Mamba-GPT-3b-v2},
110
+ author = {chiliu},
111
+ howpublished = {\url{https://huggingface.co/CobraMamba/mamba-gpt-3b-v2}},
112
+ year = {2023}
113
+ }
114
+ ```
115
+
116
+
117
+ ## Disclaimer
118
+
119
+ Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions.
120
+
121
+ - Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints.
122
+ - Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion.
123
+ - Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model.
124
+ - Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities.
125
+ - Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues.
126
+ - Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes.
127
+
128
+ By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it.
config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "_name_or_path": "openlm-research/open_llama_3b_v2",
3
- "architectures": [
4
- "LlamaForCausalLM"
5
- ],
6
- "bos_token_id": 1,
7
- "eos_token_id": 2,
8
- "hidden_act": "silu",
9
- "hidden_size": 3200,
10
- "initializer_range": 0.02,
11
- "intermediate_size": 8640,
12
- "max_position_embeddings": 2048,
13
- "model_type": "llama",
14
- "num_attention_heads": 32,
15
- "num_hidden_layers": 26,
16
- "num_key_value_heads": 32,
17
- "pad_token_id": 0,
18
- "pretraining_tp": 1,
19
- "rms_norm_eps": 1e-06,
20
- "rope_scaling": null,
21
- "tie_word_embeddings": false,
22
- "torch_dtype": "float16",
23
- "transformers_version": "4.31.0",
24
- "use_cache": true,
25
- "vocab_size": 32000
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "eos_token_id": 2,
5
- "pad_token_id": 0,
6
- "transformers_version": "4.31.0"
7
- }
 
 
 
 
 
 
 
 
model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b57e3250f0a1c3c5e01537fe3757426aa710ed5009295657321369667ef1c98
3
- size 6852980000
 
 
 
 
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c471842d9e27297fb0a615c2005a6728f873af124ee3836abd850349ec2c4427
3
- size 6853034765
 
 
 
 
special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "<unk>",
17
- "unk_token": {
18
- "content": "<unk>",
19
- "lstrip": false,
20
- "normalized": true,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:91b289e85fa20fd375d8b33dc12f77616f18abc6359804471d1fafcb425fecb8
3
- size 511574
 
 
 
 
tokenizer_config.json DELETED
@@ -1,35 +0,0 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<s>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "clean_up_tokenization_spaces": false,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "</s>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "legacy": true,
22
- "model_max_length": 2048,
23
- "pad_token": null,
24
- "padding_side": "left",
25
- "sp_model_kwargs": {},
26
- "tokenizer_class": "LlamaTokenizer",
27
- "unk_token": {
28
- "__type": "AddedToken",
29
- "content": "<unk>",
30
- "lstrip": false,
31
- "normalized": true,
32
- "rstrip": false,
33
- "single_word": false
34
- }
35
- }