File size: 1,321 Bytes
815128e
 
 
910c4c8
99d65c0
 
 
 
 
 
 
815128e
910c4c8
815128e
6011708
 
 
815128e
 
 
6011708
 
 
910c4c8
 
 
3ca5bd8
 
 
068c8a2
 
 
 
 
a467fc0
068c8a2
ea50055
 
 
815128e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99d65c0
 
 
fa9866a
99d65c0
 
 
 
fa9866a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
.PHONY: start
start:
	python app.py

serve:
ifeq ("$(PORT)", "")
	JINA_HIDE_SURVEY=1 TRANSFORMERS_OFFLINE=1 python -m lcserve deploy local server
else
	JINA_HIDE_SURVEY=1 TRANSFORMERS_OFFLINE=1 python -m lcserve deploy local server --port=${PORT}
endif

test:
	python test.py

test2:
	python server.py

chat:
	python test.py chat

chat2:
	python unit_test.py chat

unittest:
	python unit_test.py $(TEST)

tele:
	python telegram_bot.py

openllm:
ifeq ("$(PORT)", "")
	openllm start llama --model-id meta-llama/Llama-2-7b-chat-hf
else
	openllm start llama --model-id meta-llama/Llama-2-7b-chat-hf --port=${PORT}
endif

openllm-cpu:
	CUDA_VISIBLE_DEVICES="" openllm start llama --model-id meta-llama/Llama-2-7b-chat-hf

ingest:
	python ingest.py

mlock:
	@echo 'To set new value for mlock, please run: sudo prlimit --memlock=35413752832:35413752832 --pid $$$$'
	prlimit --memlock

.PHONY: format
format:
	isort .
	black .

install:
	pip install -U -r requirements.txt
	pip show langchain transformers
	
install-extra:
	CXX=g++-11  CC=gcc-11 pip install -U -r requirements_extra.txt
	pip show llama-cpp-python ctransformers
	
install-extra-mac:
	# brew install llvm libomp
	CXX=/usr/local/opt/llvm/bin/clang++ CC=/usr/local/opt/llvm/bin/clang pip install -U -r requirements_extra.txt
	pip show llama-cpp-python ctransformers