Update base_model formatting
Browse files
README.md
CHANGED
@@ -1,11 +1,13 @@
|
|
1 |
---
|
2 |
-
base_model: https://huggingface.co/codellama/CodeLlama-34b-python-hf
|
3 |
-
inference: false
|
4 |
language:
|
5 |
- code
|
6 |
license: llama2
|
7 |
-
|
|
|
8 |
model_name: CodeLlama 34B Python
|
|
|
|
|
|
|
9 |
model_type: llama
|
10 |
pipeline_tag: text-generation
|
11 |
prompt_template: '[INST] Write code to solve the following coding problem that obeys
|
@@ -18,8 +20,6 @@ prompt_template: '[INST] Write code to solve the following coding problem that o
|
|
18 |
|
19 |
'
|
20 |
quantized_by: TheBloke
|
21 |
-
tags:
|
22 |
-
- llama-2
|
23 |
---
|
24 |
|
25 |
<!-- header start -->
|
|
|
1 |
---
|
|
|
|
|
2 |
language:
|
3 |
- code
|
4 |
license: llama2
|
5 |
+
tags:
|
6 |
+
- llama-2
|
7 |
model_name: CodeLlama 34B Python
|
8 |
+
base_model: codellama/CodeLlama-34b-python-hf
|
9 |
+
inference: false
|
10 |
+
model_creator: Meta
|
11 |
model_type: llama
|
12 |
pipeline_tag: text-generation
|
13 |
prompt_template: '[INST] Write code to solve the following coding problem that obeys
|
|
|
20 |
|
21 |
'
|
22 |
quantized_by: TheBloke
|
|
|
|
|
23 |
---
|
24 |
|
25 |
<!-- header start -->
|