File size: 1,487 Bytes
e029e22
f51bb92
b83cc65
f51bb92
b83cc65
 
 
 
 
e029e22
6158da4
 
 
 
 
b83cc65
6158da4
 
e029e22
 
b83cc65
 
 
e029e22
b83cc65
 
 
 
 
 
 
6158da4
 
 
e029e22
 
 
6158da4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from langchain_openai import ChatOpenAI
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoTokenizer, TextStreamer
from langchain_community.llms import LlamaCpp
import torch
import transformers
import os
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from modules.config.constants import LLAMA_PATH


class ChatModelLoader:
    def __init__(self, config):
        self.config = config
        self.huggingface_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")

    def load_chat_model(self):
        if self.config["llm_params"]["llm_loader"] in ["gpt-3.5-turbo-1106", "gpt-4"]:
            llm = ChatOpenAI(model_name=self.config["llm_params"]["llm_loader"])
        elif self.config["llm_params"]["llm_loader"] == "local_llm":
            n_batch = 512  # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
            llm = LlamaCpp(
                model_path=LLAMA_PATH,
                n_batch=n_batch,
                n_ctx=2048,
                f16_kv=True,
                verbose=True,
                n_threads=2,
                temperature=self.config["llm_params"]["local_llm_params"][
                    "temperature"
                ],
            )
        else:
            raise ValueError(
                f"Invalid LLM Loader: {self.config['llm_params']['llm_loader']}"
            )
        return llm