File size: 3,939 Bytes
a9dfa06
562ef99
530fde7
c892105
530fde7
 
 
c892105
530fde7
 
 
 
 
 
 
 
ff68c49
530fde7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9c67a8
 
 
530fde7
 
 
 
 
ff68c49
 
 
 
 
530fde7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import os
import shutil
import subprocess
import streamlit as st
import uuid
from git import Repo
import huggingface_hub

BACKEND_REPO_URL = "https://github.com/vodkaslime/ctranslate2-converter-backend"

HOME_DIR = os.path.expanduser("~")
BACKEND_DIR = os.path.join(HOME_DIR, "backend")
BACKEND_SCRIPT = os.path.join(BACKEND_DIR, "main.py")

MODEL_ROOT_DIR = os.path.join(HOME_DIR, "models")

st.title(":wave: Tabby Model Converter")


@st.cache_resource
def init():
    if os.path.exists(BACKEND_DIR):
        return

    try:
        Repo.clone_from(BACKEND_REPO_URL, BACKEND_DIR)
        subprocess.check_call(
            [
                "pip",
                "install",
                "-r",
                os.path.join(BACKEND_DIR, "requirements.txt"),
            ]
        )
    except Exception as e:
        shutil.rmtree(BACKEND_DIR)
        st.error(f"error initializing backend: {e}")


def convert_and_upload_model(
    model,
    output_dir,
    inference_mode,
    prompt_template,
    huggingface_token,
    upload_mode,
    new_model,
):
    # Verify parameters
    if not model:
        st.error("Must provide a model name")
        return

    if not new_model:
        st.error("Must provide a new model name where the conversion will upload to")
        return

    if not huggingface_token:
        st.error("Must provide a huggingface token")
        return

    command = ["python", BACKEND_SCRIPT]
    command += ["--model", model]
    command += ["--output_dir", output_dir]
    command += ["--inference_mode", inference_mode]
    if prompt_template:
        command += ["--prompt_template", prompt_template]

    # Login on behalf of user
    huggingface_hub.login(huggingface_token)

    # Handle model conversion
    try:
        with st.spinner("Converting model"):
            subprocess.check_call(command)
    except subprocess.CalledProcessError as e:
        st.error(f"Error converting model to Tabby compatible format: {e}")
        link = f"https://huggingface.co/{model}"
        st.warning(
            f"Note: do you have access to the model? If not, visit the model page at {link} and request for access"
        )
        return

    st.success("Model successfully converted")

    # Handle model upload
    try:
        with st.spinner("Uploading converted model"):
            api = huggingface_hub.HfApi()
            if upload_mode == "new repo":
                api.create_repo(new_model)
            api.upload_folder(folder_path=output_dir, repo_id=new_model)

    except Exception as e:
        st.error(f"Error uploading model: {e}")
        return

    st.success("Model successfully uploaded.")


def clean_up(output_dir):
    try:
        with st.spinner("Cleaning up"):
            shutil.rmtree(output_dir)
    except Exception as e:
        st.error(f"Error removing work dir: {e}")
    st.success("Cleaning up finished")


init()

model = st.text_input("Model name", placeholder="Salesforce/codet5p-220m")
inference_mode = st.radio(
    "Inference mode",
    ("causallm", "seq2seq"),
)
prompt_template = st.text_input("Prompt template")
huggingface_token = st.text_input(
    "Hugging face token (must be writable token)", type="password"
)
upload_mode = st.radio(
    "Choose if you want to create a new model repo or push a commit to existing repo",
    ("new repo", "existing repo"),
)
new_model = st.text_input(
    "The new model name that the model is going to be converted to",
    placeholder="TabbyML/T5P-220M",
)
convert_button = st.button("Convert model", use_container_width=True)

if convert_button:
    id = uuid.uuid4()
    output_dir = os.path.join(MODEL_ROOT_DIR, str(id))

    # Try converting and uploading model
    convert_and_upload_model(
        model,
        output_dir,
        inference_mode,
        prompt_template,
        huggingface_token,
        upload_mode,
        new_model,
    )

    # Clean up the conversion
    clean_up(output_dir)