Spaces:
Runtime error
Runtime error
zetavg
commited on
Commit
•
62b53be
1
Parent(s):
91cd700
add sample templates data
Browse files- .gitignore +2 -0
- app.py +2 -2
- llama_lora/utils/data.py +21 -0
- templates/README.md +46 -0
- templates/alpaca.json +6 -0
- templates/alpaca_legacy.json +6 -0
- templates/alpaca_sample.json +7 -0
- templates/alpaca_short.json +6 -0
- templates/vigogne.json +6 -0
.gitignore
CHANGED
@@ -2,3 +2,5 @@ __pycache__/
|
|
2 |
.venv
|
3 |
/venv
|
4 |
.vscode
|
|
|
|
|
|
2 |
.venv
|
3 |
/venv
|
4 |
.vscode
|
5 |
+
|
6 |
+
/data
|
app.py
CHANGED
@@ -6,6 +6,7 @@ import gradio as gr
|
|
6 |
|
7 |
from llama_lora.globals import Global
|
8 |
from llama_lora.ui.main_page import main_page
|
|
|
9 |
|
10 |
|
11 |
def main(
|
@@ -28,13 +29,12 @@ def main(
|
|
28 |
|
29 |
Global.base_model = base_model
|
30 |
Global.data_dir = data_dir
|
31 |
-
Global.data_dir = data_dir
|
32 |
Global.load_8bit = load_8bit
|
33 |
|
34 |
Global.ui_show_sys_info = ui_show_sys_info
|
35 |
|
36 |
os.makedirs(data_dir, exist_ok=True)
|
37 |
-
|
38 |
|
39 |
with gr.Blocks(title=Global.ui_title) as demo:
|
40 |
main_page()
|
|
|
6 |
|
7 |
from llama_lora.globals import Global
|
8 |
from llama_lora.ui.main_page import main_page
|
9 |
+
from llama_lora.utils.data import init_data_dir
|
10 |
|
11 |
|
12 |
def main(
|
|
|
29 |
|
30 |
Global.base_model = base_model
|
31 |
Global.data_dir = data_dir
|
|
|
32 |
Global.load_8bit = load_8bit
|
33 |
|
34 |
Global.ui_show_sys_info = ui_show_sys_info
|
35 |
|
36 |
os.makedirs(data_dir, exist_ok=True)
|
37 |
+
init_data_dir()
|
38 |
|
39 |
with gr.Blocks(title=Global.ui_title) as demo:
|
40 |
main_page()
|
llama_lora/utils/data.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
|
4 |
+
from ..globals import Global
|
5 |
+
|
6 |
+
|
7 |
+
def init_data_dir():
|
8 |
+
current_file_path = os.path.abspath(__file__)
|
9 |
+
parent_directory_path = os.path.dirname(current_file_path)
|
10 |
+
project_dir_path = os.path.abspath(
|
11 |
+
os.path.join(parent_directory_path, "..", ".."))
|
12 |
+
copy_sample_data_if_not_exists(os.path.join(project_dir_path, "templates"),
|
13 |
+
os.path.join(Global.data_dir, "templates"))
|
14 |
+
|
15 |
+
|
16 |
+
def copy_sample_data_if_not_exists(source, destination):
|
17 |
+
if os.path.exists(destination):
|
18 |
+
return
|
19 |
+
|
20 |
+
print(f"Copying sample data to \"{destination}\"")
|
21 |
+
shutil.copytree(source, destination)
|
templates/README.md
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Prompt templates
|
2 |
+
|
3 |
+
This directory contains template styles for the prompts used to finetune LoRA models.
|
4 |
+
|
5 |
+
## Format
|
6 |
+
|
7 |
+
A template is described via a JSON file with the following keys:
|
8 |
+
|
9 |
+
- `prompt_input`: The template to use when input is not None. Uses `{instruction}` and `{input}` placeholders.
|
10 |
+
- `prompt_no_input`: The template to use when input is None. Uses `{instruction}` placeholders.
|
11 |
+
- `description`: A short description of the template, with possible use cases.
|
12 |
+
- `response_split`: The text to use as separator when cutting real response from the model output.
|
13 |
+
|
14 |
+
No `{response}` placeholder was used, since the response is always the last element of the template and is just to be concatenated to the rest.
|
15 |
+
|
16 |
+
## Example template
|
17 |
+
|
18 |
+
The default template, used unless otherwise specified, is `alpaca.json`
|
19 |
+
|
20 |
+
```json
|
21 |
+
{
|
22 |
+
"description": "Template used by Alpaca-LoRA.",
|
23 |
+
"prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
|
24 |
+
"prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
|
25 |
+
"response_split": "### Response:"
|
26 |
+
}
|
27 |
+
|
28 |
+
```
|
29 |
+
|
30 |
+
## Current templates
|
31 |
+
|
32 |
+
### alpaca
|
33 |
+
|
34 |
+
Default template used for generic LoRA fine tunes so far.
|
35 |
+
|
36 |
+
### alpaca_legacy
|
37 |
+
|
38 |
+
Legacy template used by the original alpaca repo, with no `\n` after the response field. Kept for reference and experiments.
|
39 |
+
|
40 |
+
### alpaca_short
|
41 |
+
|
42 |
+
A trimmed down alpaca template which seems to perform just as well and spare some tokens. Models created with the default template seem to be queryable by the short tempalte as well. More experiments are welcome.
|
43 |
+
|
44 |
+
### vigogne
|
45 |
+
|
46 |
+
The default alpaca template, translated to french. This template was used to train the "Vigogne" LoRA and is to be used to query it, or for extra fine tuning.
|
templates/alpaca.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"description": "Template used by Alpaca-LoRA.",
|
3 |
+
"prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
|
4 |
+
"prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
|
5 |
+
"response_split": "### Response:"
|
6 |
+
}
|
templates/alpaca_legacy.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"description": "Legacy template, used by Original Alpaca repository.",
|
3 |
+
"prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:",
|
4 |
+
"prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:",
|
5 |
+
"response_split": "### Response:"
|
6 |
+
}
|
templates/alpaca_sample.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"description": "Sample Alpaca-LoRA template which declares the variables used.",
|
3 |
+
"variables": ["instruction", "input"],
|
4 |
+
"prompt_with_instruction_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
|
5 |
+
"prompt_with_instruction": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
|
6 |
+
"response_split": "### Response:"
|
7 |
+
}
|
templates/alpaca_short.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"description": "A shorter template to experiment with.",
|
3 |
+
"prompt_input": "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
|
4 |
+
"prompt_no_input": "### Instruction:\n{instruction}\n\n### Response:\n",
|
5 |
+
"response_split": "### Response:"
|
6 |
+
}
|
templates/vigogne.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"description": "French template, used by Vigogne for finetuning.",
|
3 |
+
"prompt_input": "Ci-dessous se trouve une instruction qui décrit une tâche, associée à une entrée qui fournit un contexte supplémentaire. Écrivez une réponse qui complète correctement la demande.\n\n### Instruction:\n{instruction}\n\n### Entrée:\n{input}\n\n### Réponse:\n",
|
4 |
+
"prompt_no_input": "Ci-dessous se trouve une instruction qui décrit une tâche. Écrivez une réponse qui complète correctement la demande.\n\n### Instruction:\n{instruction}\n\n### Réponse:\n",
|
5 |
+
"response_split": "### Réponse:"
|
6 |
+
}
|