smu-lib-chat / unit_test.py
inflaton's picture
Duplicate from inflaton/smu-ai
d5edf96
raw
history blame
No virus
5.51 kB
# project/test.py
import os
import sys
import unittest
from timeit import default_timer as timer
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import HumanMessage
from app_modules.init import app_init
from app_modules.llm_chat_chain import ChatChain
from app_modules.llm_loader import LLMLoader
from app_modules.utils import get_device_types, print_llm_response
class TestLLMLoader(unittest.TestCase):
question = os.environ.get("CHAT_QUESTION")
def run_test_case(self, llm_model_type, query):
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4")
hf_embeddings_device_type, hf_pipeline_device_type = get_device_types()
print(f"hf_embeddings_device_type: {hf_embeddings_device_type}")
print(f"hf_pipeline_device_type: {hf_pipeline_device_type}")
llm_loader = LLMLoader(llm_model_type)
start = timer()
llm_loader.init(
n_threds=n_threds, hf_pipeline_device_type=hf_pipeline_device_type
)
end = timer()
print(f"Model loaded in {end - start:.3f}s")
result = llm_loader.llm(
[HumanMessage(content=query)] if llm_model_type == "openai" else query
)
end2 = timer()
print(f"Inference completed in {end2 - end:.3f}s")
print(result)
def test_openai(self):
self.run_test_case("openai", self.question)
def test_llamacpp(self):
self.run_test_case("llamacpp", self.question)
def test_gpt4all_j(self):
self.run_test_case("gpt4all-j", self.question)
def test_huggingface(self):
self.run_test_case("huggingface", self.question)
def test_hftgi(self):
self.run_test_case("hftgi", self.question)
class TestChatChain(unittest.TestCase):
question = os.environ.get("CHAT_QUESTION")
def run_test_case(self, llm_model_type, query):
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4")
hf_embeddings_device_type, hf_pipeline_device_type = get_device_types()
print(f"hf_embeddings_device_type: {hf_embeddings_device_type}")
print(f"hf_pipeline_device_type: {hf_pipeline_device_type}")
llm_loader = LLMLoader(llm_model_type)
start = timer()
llm_loader.init(
n_threds=n_threds, hf_pipeline_device_type=hf_pipeline_device_type
)
chat = ChatChain(llm_loader)
end = timer()
print(f"Model loaded in {end - start:.3f}s")
inputs = {"question": query}
result = chat.call_chain(inputs, None)
end2 = timer()
print(f"Inference completed in {end2 - end:.3f}s")
print(result)
inputs = {"question": "how many people?"}
result = chat.call_chain(inputs, None)
end3 = timer()
print(f"Inference completed in {end3 - end2:.3f}s")
print(result)
def test_openai(self):
self.run_test_case("openai", self.question)
def test_llamacpp(self):
self.run_test_case("llamacpp", self.question)
def test_gpt4all_j(self):
self.run_test_case("gpt4all-j", self.question)
def test_huggingface(self):
self.run_test_case("huggingface", self.question)
def test_hftgi(self):
self.run_test_case("hftgi", self.question)
class TestQAChain(unittest.TestCase):
qa_chain: any
question = os.environ.get("QA_QUESTION")
def run_test_case(self, llm_model_type, query):
start = timer()
os.environ["LLM_MODEL_TYPE"] = llm_model_type
qa_chain = app_init()[1]
end = timer()
print(f"App initialized in {end - start:.3f}s")
chat_history = []
inputs = {"question": query, "chat_history": chat_history}
result = qa_chain.call_chain(inputs, None)
end2 = timer()
print(f"Inference completed in {end2 - end:.3f}s")
print_llm_response(result)
chat_history.append((query, result["answer"]))
inputs = {"question": "tell me more", "chat_history": chat_history}
result = qa_chain.call_chain(inputs, None)
end3 = timer()
print(f"Inference completed in {end3 - end2:.3f}s")
print_llm_response(result)
def test_openai(self):
self.run_test_case("openai", self.question)
def test_llamacpp(self):
self.run_test_case("llamacpp", self.question)
def test_gpt4all_j(self):
self.run_test_case("gpt4all-j", self.question)
def test_huggingface(self):
self.run_test_case("huggingface", self.question)
def test_hftgi(self):
self.run_test_case("hftgi", self.question)
def chat():
start = timer()
llm_loader = app_init()[0]
end = timer()
print(f"Model loaded in {end - start:.3f}s")
chat_chain = ChatChain(llm_loader)
chat_history = []
chat_start = timer()
while True:
query = input("Please enter your question: ")
query = query.strip()
if query.lower() == "exit":
break
print("\nQuestion: " + query)
start = timer()
result = chat_chain.call_chain(
{"question": query, "chat_history": chat_history}, None
)
end = timer()
print(f"Completed in {end - start:.3f}s")
chat_history.append((query, result["response"]))
chat_end = timer()
print(f"Total time used: {chat_end - chat_start:.3f}s")
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "chat":
chat()
else:
unittest.main()