Spaces:
Running
Running
Jae-Won Chung
commited on
Commit
•
36fdd36
1
Parent(s):
d49d71b
Add Dockerfile and fix requirements.txt typo
Browse files- Dockerfile +38 -0
- README.md +6 -2
- benchmark.py +2 -0
- requirements.txt +2 -4
Dockerfile
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM nvidia/cuda:11.7.1-devel-ubuntu20.04
|
2 |
+
|
3 |
+
WORKDIR /workspace
|
4 |
+
|
5 |
+
# Basic installs
|
6 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
7 |
+
ENV TZ='America/Detroit'
|
8 |
+
RUN apt-get update -qq \
|
9 |
+
&& apt-get -y --no-install-recommends install \
|
10 |
+
build-essential software-properties-common wget git tar rsync \
|
11 |
+
&& apt-get clean all \
|
12 |
+
&& rm -r /var/lib/apt/lists/*
|
13 |
+
|
14 |
+
# Install Miniconda3 4.12.0
|
15 |
+
ENV PATH="/root/.local/miniconda3/bin:$PATH"
|
16 |
+
RUN mkdir -p /root/.local \
|
17 |
+
&& wget https://repo.anaconda.com/miniconda/Miniconda3-py39_23.3.1-0-Linux-x86_64.sh \
|
18 |
+
&& mkdir /root/.conda \
|
19 |
+
&& bash Miniconda3-py39_23.3.1-0-Linux-x86_64.sh -b -p /root/.local/miniconda3 \
|
20 |
+
&& rm -f Miniconda3-py39_23.3.1-0-Linux-x86_64.sh \
|
21 |
+
&& ln -sf /root/.local/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh
|
22 |
+
|
23 |
+
# Install PyTorch
|
24 |
+
RUN pip install torch==2.0.1
|
25 |
+
|
26 |
+
# Install the HEAD commit of Zeus (for ZeusMonitor)
|
27 |
+
RUN git clone https://github.com/SymbioticLab/Zeus.git zeus \
|
28 |
+
&& cd zeus \
|
29 |
+
&& pip install -e . \
|
30 |
+
&& cd ..
|
31 |
+
|
32 |
+
# Install requirements for benchmarking
|
33 |
+
ADD . /workspace/leaderboard
|
34 |
+
RUN cd leaderboard \
|
35 |
+
&& pip install -r requirements.txt \
|
36 |
+
&& cd ..
|
37 |
+
|
38 |
+
ENV TRANSFORMERS_CACHE=/data/leaderboard/hfcache
|
README.md
CHANGED
@@ -24,6 +24,10 @@ export TRANSFORMERS_CACHE=/data/leaderboard/hfcache
|
|
24 |
Run benchmarks like this:
|
25 |
|
26 |
```console
|
27 |
-
$
|
28 |
-
$
|
|
|
|
|
|
|
|
|
29 |
```
|
|
|
24 |
Run benchmarks like this:
|
25 |
|
26 |
```console
|
27 |
+
$ docker build -t leaderboard:latest .
|
28 |
+
$ docker run -it --name jw-leaderboard --gpus all --cap-add SYS_ADMIN -v /data/leaderboard:/data/leaderboard -v $HOME/workspace/leaderboard:/workspace/leaderboard leaderboard:latest bash
|
29 |
+
|
30 |
+
# cd leaderboard
|
31 |
+
# python benchmark.py --model-path /data/leaderboard/weights/lmsys/vicuna-7B --input-file /data/leaderboard/sharegpt/sg_90k_part1_html_cleaned_lang_first_sampled.json
|
32 |
+
# python benchmark.py --model-path databricks/dolly-v2-12b --input-file /data/leaderboard/sharegpt/sg_90k_part1_html_cleaned_lang_first_sampled.json
|
33 |
```
|
benchmark.py
CHANGED
@@ -70,6 +70,8 @@ def main(
|
|
70 |
raise ValueError("ChatGLM is not supported.")
|
71 |
|
72 |
# Print out what we're about to do.
|
|
|
|
|
73 |
model_name_cleaned = "--".join(model_path.split("/")[-2:])
|
74 |
output_dir = f"data/{task}/{model_name_cleaned}"
|
75 |
output_csv_path = f"{output_dir}/benchmark.json"
|
|
|
70 |
raise ValueError("ChatGLM is not supported.")
|
71 |
|
72 |
# Print out what we're about to do.
|
73 |
+
if model_path.endswith("/"):
|
74 |
+
model_path = model_path[:-1]
|
75 |
model_name_cleaned = "--".join(model_path.split("/")[-2:])
|
76 |
output_dir = f"data/{task}/{model_name_cleaned}"
|
77 |
output_csv_path = f"{output_dir}/benchmark.json"
|
requirements.txt
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
-
zeus-ml
|
2 |
-
|
3 |
rwkv==0.7.5
|
4 |
einops
|
5 |
tyro
|
6 |
-
plotpy
|
7 |
-
gradio
|
|
|
1 |
+
zeus-ml
|
2 |
+
fschat==0.2.14
|
3 |
rwkv==0.7.5
|
4 |
einops
|
5 |
tyro
|
|
|
|