Spaces:
Build error
Build error
File size: 1,912 Bytes
0cd40c7 94a2346 7009660 0cd40c7 7009660 0cd40c7 7009660 0cd40c7 7009660 0cd40c7 7009660 98639ab 4a7ac3d 98639ab aeb550e 2342754 5452dd1 7fcdc58 c474a36 551761a 09cd35e 7fcdc58 09cd35e aeb550e 5452dd1 7fcdc58 d98f29d 1383cdd 7009660 0f36dfe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
#Navigate to your user folder cd $env:USERPROFILE\AppData\Local\Docker\wsl\data
#Enter the following command resize-vhd -Path .\ext4.vhdx -SizeBytes 300GB, after that I was able to continue building with docker-compose!
FROM python:latest AS builder
RUN apt update -y
RUN apt install -y git git-lfs make gcc g++ libgmp-dev libmpfr-dev libmpc-dev
RUN git lfs install
RUN git clone https://github.com/ggerganov/llama.cpp
RUN cd llama.cpp && make
RUN git clone https://huggingface.co/nyanko7/LLaMA-7B
RUN ls -la
RUN cp -r ./LLaMA-7B ./llama.cpp/models
RUN ls -la ./llama.cpp/models/LLaMA-7B
# convert the 7B model to ggml FP16 format
WORKDIR llama.cpp
RUN python3 -m pip install -r requirements.txt
RUN python3 convert.py ./models/LLaMA-7B
# quantize the model to 4-bits (using q4_0 method)
RUN mkdir ./models/7B/
RUN ./quantize ./models/LLaMA-7B/ggml-model-f16.bin ./models/7B/ggml-model-q4_0.bin q4_0
FROM tensorflow/tensorflow:2.11.0-gpu
WORKDIR /app
COPY --from=builder /llama.cpp//models/7B/ ./mymodels/LLaMA-7B
# RUN apt-get upgrade -y
RUN apt update -y
RUN apt install -y git git-lfs
RUN apt install -y make wget git gcc g++ lhasa libgmp-dev libmpfr-dev libmpc-dev flex bison gettext texinfo ncurses-dev autoconf rsync
COPY ./requirements.txt requirements.txt
RUN pip install -r requirements.txt
COPY ./app .
#RUN python load_docs.py
#RUN --mount=type=secret,id=OPENAI_API_KEY \
# cat /run/secrets/OPENAI_API_KEY > .openaiapikey
ENV OPENAI_API_KEY=""
RUN echo "" > .openaiapikey
RUN mkdir /.cache
RUN mkdir /nltk_data
RUN mkdir /VectorStore
RUN mkdir /app/.cache
#RUN mkdir /mymodels
RUN ls -la
RUN python run.py
RUN chmod 777 /VectorStore
#RUN chmod 777 /mymodels
RUN chmod 777 /nltk_data
RUN chmod 777 /.cache
RUN chmod 777 /app/.cache
#RUN chmod 777 /app/mymodels
RUN chmod 777 /app/VectorStore/
CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.headless=true"]
#CMD ls -la
EXPOSE 7860 |